diff --git a/resource-manager/containerservice/2025-05-01/agentpools/README.md b/resource-manager/containerservice/2025-05-01/agentpools/README.md new file mode 100644 index 00000000000..7347db33e01 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/README.md @@ -0,0 +1,156 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/agentpools` Documentation + +The `agentpools` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/agentpools" +``` + + +### Client Initialization + +```go +client := agentpools.NewAgentPoolsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `AgentPoolsClient.AbortLatestOperation` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +if err := client.AbortLatestOperationThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `AgentPoolsClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +payload := agentpools.AgentPool{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload, agentpools.DefaultCreateOrUpdateOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `AgentPoolsClient.Delete` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +if err := client.DeleteThenPoll(ctx, id, agentpools.DefaultDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `AgentPoolsClient.DeleteMachines` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +payload := agentpools.AgentPoolDeleteMachinesParameter{ + // ... +} + + +if err := client.DeleteMachinesThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `AgentPoolsClient.Get` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `AgentPoolsClient.GetAvailableAgentPoolVersions` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.GetAvailableAgentPoolVersions(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `AgentPoolsClient.GetUpgradeProfile` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +read, err := client.GetUpgradeProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `AgentPoolsClient.List` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `AgentPoolsClient.UpgradeNodeImageVersion` + +```go +ctx := context.TODO() +id := agentpools.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +if err := client.UpgradeNodeImageVersionThenPoll(ctx, id); err != nil { + // handle the error +} +``` diff --git a/resource-manager/containerservice/2025-05-01/agentpools/client.go b/resource-manager/containerservice/2025-05-01/agentpools/client.go new file mode 100644 index 00000000000..c46e6323495 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/client.go @@ -0,0 +1,26 @@ +package agentpools + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolsClient struct { + Client *resourcemanager.Client +} + +func NewAgentPoolsClientWithBaseURI(sdkApi sdkEnv.Api) (*AgentPoolsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "agentpools", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating AgentPoolsClient: %+v", err) + } + + return &AgentPoolsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/constants.go b/resource-manager/containerservice/2025-05-01/agentpools/constants.go new file mode 100644 index 00000000000..a11728f1b64 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/constants.go @@ -0,0 +1,693 @@ +package agentpools + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolMode string + +const ( + AgentPoolModeGateway AgentPoolMode = "Gateway" + AgentPoolModeSystem AgentPoolMode = "System" + AgentPoolModeUser AgentPoolMode = "User" +) + +func PossibleValuesForAgentPoolMode() []string { + return []string{ + string(AgentPoolModeGateway), + string(AgentPoolModeSystem), + string(AgentPoolModeUser), + } +} + +func (s *AgentPoolMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAgentPoolMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAgentPoolMode(input string) (*AgentPoolMode, error) { + vals := map[string]AgentPoolMode{ + "gateway": AgentPoolModeGateway, + "system": AgentPoolModeSystem, + "user": AgentPoolModeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolMode(input) + return &out, nil +} + +type AgentPoolType string + +const ( + AgentPoolTypeAvailabilitySet AgentPoolType = "AvailabilitySet" + AgentPoolTypeVirtualMachineScaleSets AgentPoolType = "VirtualMachineScaleSets" + AgentPoolTypeVirtualMachines AgentPoolType = "VirtualMachines" +) + +func PossibleValuesForAgentPoolType() []string { + return []string{ + string(AgentPoolTypeAvailabilitySet), + string(AgentPoolTypeVirtualMachineScaleSets), + string(AgentPoolTypeVirtualMachines), + } +} + +func (s *AgentPoolType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAgentPoolType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAgentPoolType(input string) (*AgentPoolType, error) { + vals := map[string]AgentPoolType{ + "availabilityset": AgentPoolTypeAvailabilitySet, + "virtualmachinescalesets": AgentPoolTypeVirtualMachineScaleSets, + "virtualmachines": AgentPoolTypeVirtualMachines, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolType(input) + return &out, nil +} + +type Code string + +const ( + CodeRunning Code = "Running" + CodeStopped Code = "Stopped" +) + +func PossibleValuesForCode() []string { + return []string{ + string(CodeRunning), + string(CodeStopped), + } +} + +func (s *Code) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCode(input string) (*Code, error) { + vals := map[string]Code{ + "running": CodeRunning, + "stopped": CodeStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Code(input) + return &out, nil +} + +type GPUDriver string + +const ( + GPUDriverInstall GPUDriver = "Install" + GPUDriverNone GPUDriver = "None" +) + +func PossibleValuesForGPUDriver() []string { + return []string{ + string(GPUDriverInstall), + string(GPUDriverNone), + } +} + +func (s *GPUDriver) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseGPUDriver(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseGPUDriver(input string) (*GPUDriver, error) { + vals := map[string]GPUDriver{ + "install": GPUDriverInstall, + "none": GPUDriverNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := GPUDriver(input) + return &out, nil +} + +type GPUInstanceProfile string + +const ( + GPUInstanceProfileMIGFourg GPUInstanceProfile = "MIG4g" + GPUInstanceProfileMIGOneg GPUInstanceProfile = "MIG1g" + GPUInstanceProfileMIGSeveng GPUInstanceProfile = "MIG7g" + GPUInstanceProfileMIGThreeg GPUInstanceProfile = "MIG3g" + GPUInstanceProfileMIGTwog GPUInstanceProfile = "MIG2g" +) + +func PossibleValuesForGPUInstanceProfile() []string { + return []string{ + string(GPUInstanceProfileMIGFourg), + string(GPUInstanceProfileMIGOneg), + string(GPUInstanceProfileMIGSeveng), + string(GPUInstanceProfileMIGThreeg), + string(GPUInstanceProfileMIGTwog), + } +} + +func (s *GPUInstanceProfile) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseGPUInstanceProfile(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseGPUInstanceProfile(input string) (*GPUInstanceProfile, error) { + vals := map[string]GPUInstanceProfile{ + "mig4g": GPUInstanceProfileMIGFourg, + "mig1g": GPUInstanceProfileMIGOneg, + "mig7g": GPUInstanceProfileMIGSeveng, + "mig3g": GPUInstanceProfileMIGThreeg, + "mig2g": GPUInstanceProfileMIGTwog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := GPUInstanceProfile(input) + return &out, nil +} + +type KubeletDiskType string + +const ( + KubeletDiskTypeOS KubeletDiskType = "OS" + KubeletDiskTypeTemporary KubeletDiskType = "Temporary" +) + +func PossibleValuesForKubeletDiskType() []string { + return []string{ + string(KubeletDiskTypeOS), + string(KubeletDiskTypeTemporary), + } +} + +func (s *KubeletDiskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseKubeletDiskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseKubeletDiskType(input string) (*KubeletDiskType, error) { + vals := map[string]KubeletDiskType{ + "os": KubeletDiskTypeOS, + "temporary": KubeletDiskTypeTemporary, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KubeletDiskType(input) + return &out, nil +} + +type OSDiskType string + +const ( + OSDiskTypeEphemeral OSDiskType = "Ephemeral" + OSDiskTypeManaged OSDiskType = "Managed" +) + +func PossibleValuesForOSDiskType() []string { + return []string{ + string(OSDiskTypeEphemeral), + string(OSDiskTypeManaged), + } +} + +func (s *OSDiskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSDiskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSDiskType(input string) (*OSDiskType, error) { + vals := map[string]OSDiskType{ + "ephemeral": OSDiskTypeEphemeral, + "managed": OSDiskTypeManaged, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSDiskType(input) + return &out, nil +} + +type OSSKU string + +const ( + OSSKUAzureLinux OSSKU = "AzureLinux" + OSSKUCBLMariner OSSKU = "CBLMariner" + OSSKUUbuntu OSSKU = "Ubuntu" + OSSKUUbuntuTwoTwoZeroFour OSSKU = "Ubuntu2204" + OSSKUWindowsTwoZeroOneNine OSSKU = "Windows2019" + OSSKUWindowsTwoZeroTwoTwo OSSKU = "Windows2022" +) + +func PossibleValuesForOSSKU() []string { + return []string{ + string(OSSKUAzureLinux), + string(OSSKUCBLMariner), + string(OSSKUUbuntu), + string(OSSKUUbuntuTwoTwoZeroFour), + string(OSSKUWindowsTwoZeroOneNine), + string(OSSKUWindowsTwoZeroTwoTwo), + } +} + +func (s *OSSKU) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSSKU(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSSKU(input string) (*OSSKU, error) { + vals := map[string]OSSKU{ + "azurelinux": OSSKUAzureLinux, + "cblmariner": OSSKUCBLMariner, + "ubuntu": OSSKUUbuntu, + "ubuntu2204": OSSKUUbuntuTwoTwoZeroFour, + "windows2019": OSSKUWindowsTwoZeroOneNine, + "windows2022": OSSKUWindowsTwoZeroTwoTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSSKU(input) + return &out, nil +} + +type OSType string + +const ( + OSTypeLinux OSType = "Linux" + OSTypeWindows OSType = "Windows" +) + +func PossibleValuesForOSType() []string { + return []string{ + string(OSTypeLinux), + string(OSTypeWindows), + } +} + +func (s *OSType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSType(input string) (*OSType, error) { + vals := map[string]OSType{ + "linux": OSTypeLinux, + "windows": OSTypeWindows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSType(input) + return &out, nil +} + +type PodIPAllocationMode string + +const ( + PodIPAllocationModeDynamicIndividual PodIPAllocationMode = "DynamicIndividual" + PodIPAllocationModeStaticBlock PodIPAllocationMode = "StaticBlock" +) + +func PossibleValuesForPodIPAllocationMode() []string { + return []string{ + string(PodIPAllocationModeDynamicIndividual), + string(PodIPAllocationModeStaticBlock), + } +} + +func (s *PodIPAllocationMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePodIPAllocationMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePodIPAllocationMode(input string) (*PodIPAllocationMode, error) { + vals := map[string]PodIPAllocationMode{ + "dynamicindividual": PodIPAllocationModeDynamicIndividual, + "staticblock": PodIPAllocationModeStaticBlock, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PodIPAllocationMode(input) + return &out, nil +} + +type Protocol string + +const ( + ProtocolTCP Protocol = "TCP" + ProtocolUDP Protocol = "UDP" +) + +func PossibleValuesForProtocol() []string { + return []string{ + string(ProtocolTCP), + string(ProtocolUDP), + } +} + +func (s *Protocol) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProtocol(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProtocol(input string) (*Protocol, error) { + vals := map[string]Protocol{ + "tcp": ProtocolTCP, + "udp": ProtocolUDP, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Protocol(input) + return &out, nil +} + +type ScaleDownMode string + +const ( + ScaleDownModeDeallocate ScaleDownMode = "Deallocate" + ScaleDownModeDelete ScaleDownMode = "Delete" +) + +func PossibleValuesForScaleDownMode() []string { + return []string{ + string(ScaleDownModeDeallocate), + string(ScaleDownModeDelete), + } +} + +func (s *ScaleDownMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScaleDownMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScaleDownMode(input string) (*ScaleDownMode, error) { + vals := map[string]ScaleDownMode{ + "deallocate": ScaleDownModeDeallocate, + "delete": ScaleDownModeDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleDownMode(input) + return &out, nil +} + +type ScaleSetEvictionPolicy string + +const ( + ScaleSetEvictionPolicyDeallocate ScaleSetEvictionPolicy = "Deallocate" + ScaleSetEvictionPolicyDelete ScaleSetEvictionPolicy = "Delete" +) + +func PossibleValuesForScaleSetEvictionPolicy() []string { + return []string{ + string(ScaleSetEvictionPolicyDeallocate), + string(ScaleSetEvictionPolicyDelete), + } +} + +func (s *ScaleSetEvictionPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScaleSetEvictionPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScaleSetEvictionPolicy(input string) (*ScaleSetEvictionPolicy, error) { + vals := map[string]ScaleSetEvictionPolicy{ + "deallocate": ScaleSetEvictionPolicyDeallocate, + "delete": ScaleSetEvictionPolicyDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetEvictionPolicy(input) + return &out, nil +} + +type ScaleSetPriority string + +const ( + ScaleSetPriorityRegular ScaleSetPriority = "Regular" + ScaleSetPrioritySpot ScaleSetPriority = "Spot" +) + +func PossibleValuesForScaleSetPriority() []string { + return []string{ + string(ScaleSetPriorityRegular), + string(ScaleSetPrioritySpot), + } +} + +func (s *ScaleSetPriority) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScaleSetPriority(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScaleSetPriority(input string) (*ScaleSetPriority, error) { + vals := map[string]ScaleSetPriority{ + "regular": ScaleSetPriorityRegular, + "spot": ScaleSetPrioritySpot, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetPriority(input) + return &out, nil +} + +type UndrainableNodeBehavior string + +const ( + UndrainableNodeBehaviorCordon UndrainableNodeBehavior = "Cordon" + UndrainableNodeBehaviorSchedule UndrainableNodeBehavior = "Schedule" +) + +func PossibleValuesForUndrainableNodeBehavior() []string { + return []string{ + string(UndrainableNodeBehaviorCordon), + string(UndrainableNodeBehaviorSchedule), + } +} + +func (s *UndrainableNodeBehavior) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUndrainableNodeBehavior(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUndrainableNodeBehavior(input string) (*UndrainableNodeBehavior, error) { + vals := map[string]UndrainableNodeBehavior{ + "cordon": UndrainableNodeBehaviorCordon, + "schedule": UndrainableNodeBehaviorSchedule, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UndrainableNodeBehavior(input) + return &out, nil +} + +type WorkloadRuntime string + +const ( + WorkloadRuntimeOCIContainer WorkloadRuntime = "OCIContainer" + WorkloadRuntimeWasmWasi WorkloadRuntime = "WasmWasi" +) + +func PossibleValuesForWorkloadRuntime() []string { + return []string{ + string(WorkloadRuntimeOCIContainer), + string(WorkloadRuntimeWasmWasi), + } +} + +func (s *WorkloadRuntime) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseWorkloadRuntime(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseWorkloadRuntime(input string) (*WorkloadRuntime, error) { + vals := map[string]WorkloadRuntime{ + "ocicontainer": WorkloadRuntimeOCIContainer, + "wasmwasi": WorkloadRuntimeWasmWasi, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := WorkloadRuntime(input) + return &out, nil +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/id_agentpool.go b/resource-manager/containerservice/2025-05-01/agentpools/id_agentpool.go new file mode 100644 index 00000000000..d1db170c217 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/id_agentpool.go @@ -0,0 +1,139 @@ +package agentpools + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&AgentPoolId{}) +} + +var _ resourceids.ResourceId = &AgentPoolId{} + +// AgentPoolId is a struct representing the Resource ID for a Agent Pool +type AgentPoolId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + AgentPoolName string +} + +// NewAgentPoolID returns a new AgentPoolId struct +func NewAgentPoolID(subscriptionId string, resourceGroupName string, managedClusterName string, agentPoolName string) AgentPoolId { + return AgentPoolId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + AgentPoolName: agentPoolName, + } +} + +// ParseAgentPoolID parses 'input' into a AgentPoolId +func ParseAgentPoolID(input string) (*AgentPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&AgentPoolId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AgentPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseAgentPoolIDInsensitively parses 'input' case-insensitively into a AgentPoolId +// note: this method should only be used for API response data and not user input +func ParseAgentPoolIDInsensitively(input string) (*AgentPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&AgentPoolId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AgentPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *AgentPoolId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.AgentPoolName, ok = input.Parsed["agentPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "agentPoolName", input) + } + + return nil +} + +// ValidateAgentPoolID checks that 'input' can be parsed as a Agent Pool ID +func ValidateAgentPoolID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAgentPoolID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Agent Pool ID +func (id AgentPoolId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/agentPools/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.AgentPoolName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Agent Pool ID +func (id AgentPoolId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticAgentPools", "agentPools", "agentPools"), + resourceids.UserSpecifiedSegment("agentPoolName", "agentPoolName"), + } +} + +// String returns a human-readable description of this Agent Pool ID +func (id AgentPoolId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Agent Pool Name: %q", id.AgentPoolName), + } + return fmt.Sprintf("Agent Pool (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/id_agentpool_test.go b/resource-manager/containerservice/2025-05-01/agentpools/id_agentpool_test.go new file mode 100644 index 00000000000..c11edfbefce --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/id_agentpool_test.go @@ -0,0 +1,327 @@ +package agentpools + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &AgentPoolId{} + +func TestNewAgentPoolID(t *testing.T) { + id := NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.AgentPoolName != "agentPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'AgentPoolName'", id.AgentPoolName, "agentPoolName") + } +} + +func TestFormatAgentPoolID(t *testing.T) { + actual := NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseAgentPoolID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AgentPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName", + Expected: &AgentPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AgentPoolName: "agentPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAgentPoolID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AgentPoolName != v.Expected.AgentPoolName { + t.Fatalf("Expected %q but got %q for AgentPoolName", v.Expected.AgentPoolName, actual.AgentPoolName) + } + + } +} + +func TestParseAgentPoolIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AgentPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName", + Expected: &AgentPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AgentPoolName: "agentPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe", + Expected: &AgentPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + AgentPoolName: "aGeNtPoOlNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAgentPoolIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AgentPoolName != v.Expected.AgentPoolName { + t.Fatalf("Expected %q but got %q for AgentPoolName", v.Expected.AgentPoolName, actual.AgentPoolName) + } + + } +} + +func TestSegmentsForAgentPoolId(t *testing.T) { + segments := AgentPoolId{}.Segments() + if len(segments) == 0 { + t.Fatalf("AgentPoolId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_abortlatestoperation.go b/resource-manager/containerservice/2025-05-01/agentpools/method_abortlatestoperation.go new file mode 100644 index 00000000000..bb6d06d7487 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_abortlatestoperation.go @@ -0,0 +1,70 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AbortLatestOperationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// AbortLatestOperation ... +func (c AgentPoolsClient) AbortLatestOperation(ctx context.Context, id AgentPoolId) (result AbortLatestOperationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/abort", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AbortLatestOperationThenPoll performs AbortLatestOperation then polls until it's completed +func (c AgentPoolsClient) AbortLatestOperationThenPoll(ctx context.Context, id AgentPoolId) error { + result, err := c.AbortLatestOperation(ctx, id) + if err != nil { + return fmt.Errorf("performing AbortLatestOperation: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AbortLatestOperation: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_createorupdate.go b/resource-manager/containerservice/2025-05-01/agentpools/method_createorupdate.go new file mode 100644 index 00000000000..ac21aa2c854 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_createorupdate.go @@ -0,0 +1,108 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *AgentPool +} + +type CreateOrUpdateOperationOptions struct { + IfMatch *string + IfNoneMatch *string +} + +func DefaultCreateOrUpdateOperationOptions() CreateOrUpdateOperationOptions { + return CreateOrUpdateOperationOptions{} +} + +func (o CreateOrUpdateOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + if o.IfMatch != nil { + out.Append("If-Match", fmt.Sprintf("%v", *o.IfMatch)) + } + if o.IfNoneMatch != nil { + out.Append("If-None-Match", fmt.Sprintf("%v", *o.IfNoneMatch)) + } + return &out +} + +func (o CreateOrUpdateOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o CreateOrUpdateOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + + return &out +} + +// CreateOrUpdate ... +func (c AgentPoolsClient) CreateOrUpdate(ctx context.Context, id AgentPoolId, input AgentPool, options CreateOrUpdateOperationOptions) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c AgentPoolsClient) CreateOrUpdateThenPoll(ctx context.Context, id AgentPoolId, input AgentPool, options CreateOrUpdateOperationOptions) error { + result, err := c.CreateOrUpdate(ctx, id, input, options) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_delete.go b/resource-manager/containerservice/2025-05-01/agentpools/method_delete.go new file mode 100644 index 00000000000..1e402c3c3f4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_delete.go @@ -0,0 +1,102 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type DeleteOperationOptions struct { + IfMatch *string + IgnorePodDisruptionBudget *bool +} + +func DefaultDeleteOperationOptions() DeleteOperationOptions { + return DeleteOperationOptions{} +} + +func (o DeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + if o.IfMatch != nil { + out.Append("If-Match", fmt.Sprintf("%v", *o.IfMatch)) + } + return &out +} + +func (o DeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o DeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.IgnorePodDisruptionBudget != nil { + out.Append("ignore-pod-disruption-budget", fmt.Sprintf("%v", *o.IgnorePodDisruptionBudget)) + } + return &out +} + +// Delete ... +func (c AgentPoolsClient) Delete(ctx context.Context, id AgentPoolId, options DeleteOperationOptions) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c AgentPoolsClient) DeleteThenPoll(ctx context.Context, id AgentPoolId, options DeleteOperationOptions) error { + result, err := c.Delete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_deletemachines.go b/resource-manager/containerservice/2025-05-01/agentpools/method_deletemachines.go new file mode 100644 index 00000000000..b07d10227fb --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_deletemachines.go @@ -0,0 +1,73 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteMachinesOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// DeleteMachines ... +func (c AgentPoolsClient) DeleteMachines(ctx context.Context, id AgentPoolId, input AgentPoolDeleteMachinesParameter) (result DeleteMachinesOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/deleteMachines", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteMachinesThenPoll performs DeleteMachines then polls until it's completed +func (c AgentPoolsClient) DeleteMachinesThenPoll(ctx context.Context, id AgentPoolId, input AgentPoolDeleteMachinesParameter) error { + result, err := c.DeleteMachines(ctx, id, input) + if err != nil { + return fmt.Errorf("performing DeleteMachines: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after DeleteMachines: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_get.go b/resource-manager/containerservice/2025-05-01/agentpools/method_get.go new file mode 100644 index 00000000000..ea20389e4a3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_get.go @@ -0,0 +1,53 @@ +package agentpools + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *AgentPool +} + +// Get ... +func (c AgentPoolsClient) Get(ctx context.Context, id AgentPoolId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model AgentPool + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_getavailableagentpoolversions.go b/resource-manager/containerservice/2025-05-01/agentpools/method_getavailableagentpoolversions.go new file mode 100644 index 00000000000..685d4c2c2ec --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_getavailableagentpoolversions.go @@ -0,0 +1,55 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetAvailableAgentPoolVersionsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *AgentPoolAvailableVersions +} + +// GetAvailableAgentPoolVersions ... +func (c AgentPoolsClient) GetAvailableAgentPoolVersions(ctx context.Context, id commonids.KubernetesClusterId) (result GetAvailableAgentPoolVersionsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/availableAgentPoolVersions", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model AgentPoolAvailableVersions + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_getupgradeprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/method_getupgradeprofile.go new file mode 100644 index 00000000000..1810ae898b6 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_getupgradeprofile.go @@ -0,0 +1,54 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUpgradeProfileOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *AgentPoolUpgradeProfile +} + +// GetUpgradeProfile ... +func (c AgentPoolsClient) GetUpgradeProfile(ctx context.Context, id AgentPoolId) (result GetUpgradeProfileOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/upgradeProfiles/default", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model AgentPoolUpgradeProfile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_list.go b/resource-manager/containerservice/2025-05-01/agentpools/method_list.go new file mode 100644 index 00000000000..63350a29f6d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_list.go @@ -0,0 +1,106 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]AgentPool +} + +type ListCompleteResult struct { + LatestHttpResponse *http.Response + Items []AgentPool +} + +type ListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// List ... +func (c AgentPoolsClient) List(ctx context.Context, id commonids.KubernetesClusterId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListCustomPager{}, + Path: fmt.Sprintf("%s/agentPools", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]AgentPool `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListComplete retrieves all the results into a single object +func (c AgentPoolsClient) ListComplete(ctx context.Context, id commonids.KubernetesClusterId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, AgentPoolOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c AgentPoolsClient) ListCompleteMatchingPredicate(ctx context.Context, id commonids.KubernetesClusterId, predicate AgentPoolOperationPredicate) (result ListCompleteResult, err error) { + items := make([]AgentPool, 0) + + resp, err := c.List(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/method_upgradenodeimageversion.go b/resource-manager/containerservice/2025-05-01/agentpools/method_upgradenodeimageversion.go new file mode 100644 index 00000000000..fa6b0240a55 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/method_upgradenodeimageversion.go @@ -0,0 +1,71 @@ +package agentpools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpgradeNodeImageVersionOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *AgentPool +} + +// UpgradeNodeImageVersion ... +func (c AgentPoolsClient) UpgradeNodeImageVersion(ctx context.Context, id AgentPoolId) (result UpgradeNodeImageVersionOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/upgradeNodeImageVersion", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpgradeNodeImageVersionThenPoll performs UpgradeNodeImageVersion then polls until it's completed +func (c AgentPoolsClient) UpgradeNodeImageVersionThenPoll(ctx context.Context, id AgentPoolId) error { + result, err := c.UpgradeNodeImageVersion(ctx, id) + if err != nil { + return fmt.Errorf("performing UpgradeNodeImageVersion: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after UpgradeNodeImageVersion: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpool.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpool.go new file mode 100644 index 00000000000..c67ae33e826 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpool.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPool struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ManagedClusterAgentPoolProfileProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversions.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversions.go new file mode 100644 index 00000000000..0b65a05c663 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversions.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolAvailableVersions struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties AgentPoolAvailableVersionsProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversionsproperties.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversionsproperties.go new file mode 100644 index 00000000000..a8369deaba7 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversionsproperties.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolAvailableVersionsProperties struct { + AgentPoolVersions *[]AgentPoolAvailableVersionsPropertiesAgentPoolVersionsInlined `json:"agentPoolVersions,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversionspropertiesagentpoolversionsinlined.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversionspropertiesagentpoolversionsinlined.go new file mode 100644 index 00000000000..1631b3137d1 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolavailableversionspropertiesagentpoolversionsinlined.go @@ -0,0 +1,10 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolAvailableVersionsPropertiesAgentPoolVersionsInlined struct { + Default *bool `json:"default,omitempty"` + IsPreview *bool `json:"isPreview,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpooldeletemachinesparameter.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpooldeletemachinesparameter.go new file mode 100644 index 00000000000..f065e76b7a4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpooldeletemachinesparameter.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolDeleteMachinesParameter struct { + MachineNames []string `json:"machineNames"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolgatewayprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolgatewayprofile.go new file mode 100644 index 00000000000..ec0ff45ac3b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolgatewayprofile.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolGatewayProfile struct { + PublicIPPrefixSize *int64 `json:"publicIPPrefixSize,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolnetworkprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolnetworkprofile.go new file mode 100644 index 00000000000..db217a29cc5 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolnetworkprofile.go @@ -0,0 +1,10 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolNetworkProfile struct { + AllowedHostPorts *[]PortRange `json:"allowedHostPorts,omitempty"` + ApplicationSecurityGroups *[]string `json:"applicationSecurityGroups,omitempty"` + NodePublicIPTags *[]IPTag `json:"nodePublicIPTags,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolsecurityprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolsecurityprofile.go new file mode 100644 index 00000000000..78ac43a520f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolsecurityprofile.go @@ -0,0 +1,9 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolSecurityProfile struct { + EnableSecureBoot *bool `json:"enableSecureBoot,omitempty"` + EnableVTPM *bool `json:"enableVTPM,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolstatus.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolstatus.go new file mode 100644 index 00000000000..3ead0562245 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolstatus.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolStatus struct { + ProvisioningError *CloudErrorBody `json:"provisioningError,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofile.go new file mode 100644 index 00000000000..23cc2927f28 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofile.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeProfile struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties AgentPoolUpgradeProfileProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofileproperties.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofileproperties.go new file mode 100644 index 00000000000..5df00c96762 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofileproperties.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeProfileProperties struct { + KubernetesVersion string `json:"kubernetesVersion"` + LatestNodeImageVersion *string `json:"latestNodeImageVersion,omitempty"` + OsType OSType `json:"osType"` + Upgrades *[]AgentPoolUpgradeProfilePropertiesUpgradesInlined `json:"upgrades,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofilepropertiesupgradesinlined.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofilepropertiesupgradesinlined.go new file mode 100644 index 00000000000..b45442f6cf0 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradeprofilepropertiesupgradesinlined.go @@ -0,0 +1,9 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeProfilePropertiesUpgradesInlined struct { + IsPreview *bool `json:"isPreview,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradesettings.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradesettings.go new file mode 100644 index 00000000000..f6817a4838c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolupgradesettings.go @@ -0,0 +1,12 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeSettings struct { + DrainTimeoutInMinutes *int64 `json:"drainTimeoutInMinutes,omitempty"` + MaxSurge *string `json:"maxSurge,omitempty"` + MaxUnavailable *string `json:"maxUnavailable,omitempty"` + NodeSoakDurationInMinutes *int64 `json:"nodeSoakDurationInMinutes,omitempty"` + UndrainableNodeBehavior *UndrainableNodeBehavior `json:"undrainableNodeBehavior,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolwindowsprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolwindowsprofile.go new file mode 100644 index 00000000000..d7ad07f7f69 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_agentpoolwindowsprofile.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolWindowsProfile struct { + DisableOutboundNat *bool `json:"disableOutboundNat,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_clouderrorbody.go b/resource-manager/containerservice/2025-05-01/agentpools/model_clouderrorbody.go new file mode 100644 index 00000000000..946f90d4f23 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_clouderrorbody.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CloudErrorBody struct { + Code *string `json:"code,omitempty"` + Details *[]CloudErrorBody `json:"details,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_creationdata.go b/resource-manager/containerservice/2025-05-01/agentpools/model_creationdata.go new file mode 100644 index 00000000000..88a8fe8123b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_creationdata.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreationData struct { + SourceResourceId *string `json:"sourceResourceId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_gpuprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_gpuprofile.go new file mode 100644 index 00000000000..89ac80213cf --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_gpuprofile.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GPUProfile struct { + Driver *GPUDriver `json:"driver,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_iptag.go b/resource-manager/containerservice/2025-05-01/agentpools/model_iptag.go new file mode 100644 index 00000000000..8a805afc241 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_iptag.go @@ -0,0 +1,9 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IPTag struct { + IPTagType *string `json:"ipTagType,omitempty"` + Tag *string `json:"tag,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_kubeletconfig.go b/resource-manager/containerservice/2025-05-01/agentpools/model_kubeletconfig.go new file mode 100644 index 00000000000..7d56f79880c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_kubeletconfig.go @@ -0,0 +1,18 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubeletConfig struct { + AllowedUnsafeSysctls *[]string `json:"allowedUnsafeSysctls,omitempty"` + ContainerLogMaxFiles *int64 `json:"containerLogMaxFiles,omitempty"` + ContainerLogMaxSizeMB *int64 `json:"containerLogMaxSizeMB,omitempty"` + CpuCfsQuota *bool `json:"cpuCfsQuota,omitempty"` + CpuCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty"` + CpuManagerPolicy *string `json:"cpuManagerPolicy,omitempty"` + FailSwapOn *bool `json:"failSwapOn,omitempty"` + ImageGcHighThreshold *int64 `json:"imageGcHighThreshold,omitempty"` + ImageGcLowThreshold *int64 `json:"imageGcLowThreshold,omitempty"` + PodMaxPids *int64 `json:"podMaxPids,omitempty"` + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_linuxosconfig.go b/resource-manager/containerservice/2025-05-01/agentpools/model_linuxosconfig.go new file mode 100644 index 00000000000..8256b210a85 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_linuxosconfig.go @@ -0,0 +1,11 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type LinuxOSConfig struct { + SwapFileSizeMB *int64 `json:"swapFileSizeMB,omitempty"` + Sysctls *SysctlConfig `json:"sysctls,omitempty"` + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty"` + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_managedclusteragentpoolprofileproperties.go b/resource-manager/containerservice/2025-05-01/agentpools/model_managedclusteragentpoolprofileproperties.go new file mode 100644 index 00000000000..119fc362986 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_managedclusteragentpoolprofileproperties.go @@ -0,0 +1,64 @@ +package agentpools + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAgentPoolProfileProperties struct { + AvailabilityZones *zones.Schema `json:"availabilityZones,omitempty"` + CapacityReservationGroupID *string `json:"capacityReservationGroupID,omitempty"` + Count *int64 `json:"count,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + CurrentOrchestratorVersion *string `json:"currentOrchestratorVersion,omitempty"` + ETag *string `json:"eTag,omitempty"` + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + EnableEncryptionAtHost *bool `json:"enableEncryptionAtHost,omitempty"` + EnableFIPS *bool `json:"enableFIPS,omitempty"` + EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` + EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + GatewayProfile *AgentPoolGatewayProfile `json:"gatewayProfile,omitempty"` + GpuInstanceProfile *GPUInstanceProfile `json:"gpuInstanceProfile,omitempty"` + GpuProfile *GPUProfile `json:"gpuProfile,omitempty"` + HostGroupID *string `json:"hostGroupID,omitempty"` + KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"` + KubeletDiskType *KubeletDiskType `json:"kubeletDiskType,omitempty"` + LinuxOSConfig *LinuxOSConfig `json:"linuxOSConfig,omitempty"` + MaxCount *int64 `json:"maxCount,omitempty"` + MaxPods *int64 `json:"maxPods,omitempty"` + MessageOfTheDay *string `json:"messageOfTheDay,omitempty"` + MinCount *int64 `json:"minCount,omitempty"` + Mode *AgentPoolMode `json:"mode,omitempty"` + NetworkProfile *AgentPoolNetworkProfile `json:"networkProfile,omitempty"` + NodeImageVersion *string `json:"nodeImageVersion,omitempty"` + NodeLabels *map[string]string `json:"nodeLabels,omitempty"` + NodePublicIPPrefixID *string `json:"nodePublicIPPrefixID,omitempty"` + NodeTaints *[]string `json:"nodeTaints,omitempty"` + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + OsDiskSizeGB *int64 `json:"osDiskSizeGB,omitempty"` + OsDiskType *OSDiskType `json:"osDiskType,omitempty"` + OsSKU *OSSKU `json:"osSKU,omitempty"` + OsType *OSType `json:"osType,omitempty"` + PodIPAllocationMode *PodIPAllocationMode `json:"podIPAllocationMode,omitempty"` + PodSubnetID *string `json:"podSubnetID,omitempty"` + PowerState *PowerState `json:"powerState,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroupID *string `json:"proximityPlacementGroupID,omitempty"` + ScaleDownMode *ScaleDownMode `json:"scaleDownMode,omitempty"` + ScaleSetEvictionPolicy *ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` + ScaleSetPriority *ScaleSetPriority `json:"scaleSetPriority,omitempty"` + SecurityProfile *AgentPoolSecurityProfile `json:"securityProfile,omitempty"` + SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty"` + Status *AgentPoolStatus `json:"status,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *AgentPoolType `json:"type,omitempty"` + UpgradeSettings *AgentPoolUpgradeSettings `json:"upgradeSettings,omitempty"` + VMSize *string `json:"vmSize,omitempty"` + VirtualMachineNodesStatus *[]VirtualMachineNodes `json:"virtualMachineNodesStatus,omitempty"` + VirtualMachinesProfile *VirtualMachinesProfile `json:"virtualMachinesProfile,omitempty"` + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + WindowsProfile *AgentPoolWindowsProfile `json:"windowsProfile,omitempty"` + WorkloadRuntime *WorkloadRuntime `json:"workloadRuntime,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_manualscaleprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_manualscaleprofile.go new file mode 100644 index 00000000000..7afcad5f2ea --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_manualscaleprofile.go @@ -0,0 +1,9 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManualScaleProfile struct { + Count *int64 `json:"count,omitempty"` + Size *string `json:"size,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_portrange.go b/resource-manager/containerservice/2025-05-01/agentpools/model_portrange.go new file mode 100644 index 00000000000..45b2a562032 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_portrange.go @@ -0,0 +1,10 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PortRange struct { + PortEnd *int64 `json:"portEnd,omitempty"` + PortStart *int64 `json:"portStart,omitempty"` + Protocol *Protocol `json:"protocol,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_powerstate.go b/resource-manager/containerservice/2025-05-01/agentpools/model_powerstate.go new file mode 100644 index 00000000000..c5939d90672 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_powerstate.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PowerState struct { + Code *Code `json:"code,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_scaleprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_scaleprofile.go new file mode 100644 index 00000000000..1b9ded37afc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_scaleprofile.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ScaleProfile struct { + Manual *[]ManualScaleProfile `json:"manual,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_sysctlconfig.go b/resource-manager/containerservice/2025-05-01/agentpools/model_sysctlconfig.go new file mode 100644 index 00000000000..aa739bf4f68 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_sysctlconfig.go @@ -0,0 +1,35 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SysctlConfig struct { + FsAioMaxNr *int64 `json:"fsAioMaxNr,omitempty"` + FsFileMax *int64 `json:"fsFileMax,omitempty"` + FsInotifyMaxUserWatches *int64 `json:"fsInotifyMaxUserWatches,omitempty"` + FsNrOpen *int64 `json:"fsNrOpen,omitempty"` + KernelThreadsMax *int64 `json:"kernelThreadsMax,omitempty"` + NetCoreNetdevMaxBacklog *int64 `json:"netCoreNetdevMaxBacklog,omitempty"` + NetCoreOptmemMax *int64 `json:"netCoreOptmemMax,omitempty"` + NetCoreRmemDefault *int64 `json:"netCoreRmemDefault,omitempty"` + NetCoreRmemMax *int64 `json:"netCoreRmemMax,omitempty"` + NetCoreSomaxconn *int64 `json:"netCoreSomaxconn,omitempty"` + NetCoreWmemDefault *int64 `json:"netCoreWmemDefault,omitempty"` + NetCoreWmemMax *int64 `json:"netCoreWmemMax,omitempty"` + NetIPv4IPLocalPortRange *string `json:"netIpv4IpLocalPortRange,omitempty"` + NetIPv4NeighDefaultGcThresh1 *int64 `json:"netIpv4NeighDefaultGcThresh1,omitempty"` + NetIPv4NeighDefaultGcThresh2 *int64 `json:"netIpv4NeighDefaultGcThresh2,omitempty"` + NetIPv4NeighDefaultGcThresh3 *int64 `json:"netIpv4NeighDefaultGcThresh3,omitempty"` + NetIPv4TcpFinTimeout *int64 `json:"netIpv4TcpFinTimeout,omitempty"` + NetIPv4TcpKeepaliveProbes *int64 `json:"netIpv4TcpKeepaliveProbes,omitempty"` + NetIPv4TcpKeepaliveTime *int64 `json:"netIpv4TcpKeepaliveTime,omitempty"` + NetIPv4TcpMaxSynBacklog *int64 `json:"netIpv4TcpMaxSynBacklog,omitempty"` + NetIPv4TcpMaxTwBuckets *int64 `json:"netIpv4TcpMaxTwBuckets,omitempty"` + NetIPv4TcpTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty"` + NetIPv4TcpkeepaliveIntvl *int64 `json:"netIpv4TcpkeepaliveIntvl,omitempty"` + NetNetfilterNfConntrackBuckets *int64 `json:"netNetfilterNfConntrackBuckets,omitempty"` + NetNetfilterNfConntrackMax *int64 `json:"netNetfilterNfConntrackMax,omitempty"` + VMMaxMapCount *int64 `json:"vmMaxMapCount,omitempty"` + VMSwappiness *int64 `json:"vmSwappiness,omitempty"` + VMVfsCachePressure *int64 `json:"vmVfsCachePressure,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_virtualmachinenodes.go b/resource-manager/containerservice/2025-05-01/agentpools/model_virtualmachinenodes.go new file mode 100644 index 00000000000..769be8c237a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_virtualmachinenodes.go @@ -0,0 +1,9 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VirtualMachineNodes struct { + Count *int64 `json:"count,omitempty"` + Size *string `json:"size,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/model_virtualmachinesprofile.go b/resource-manager/containerservice/2025-05-01/agentpools/model_virtualmachinesprofile.go new file mode 100644 index 00000000000..11648990094 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/model_virtualmachinesprofile.go @@ -0,0 +1,8 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VirtualMachinesProfile struct { + Scale *ScaleProfile `json:"scale,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/predicates.go b/resource-manager/containerservice/2025-05-01/agentpools/predicates.go new file mode 100644 index 00000000000..94a2db5c63b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/predicates.go @@ -0,0 +1,27 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p AgentPoolOperationPredicate) Matches(input AgentPool) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/containerservice/2025-05-01/agentpools/version.go b/resource-manager/containerservice/2025-05-01/agentpools/version.go new file mode 100644 index 00000000000..8592adb1654 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/agentpools/version.go @@ -0,0 +1,10 @@ +package agentpools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/agentpools/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/client.go b/resource-manager/containerservice/2025-05-01/client.go new file mode 100644 index 00000000000..d222465d033 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/client.go @@ -0,0 +1,100 @@ +package v2025_05_01 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/agentpools" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/machines" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/maintenanceconfigurations" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/managedclusters" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/privateendpointconnections" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/privatelinkresources" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/snapshots" + "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/trustedaccess" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +type Client struct { + AgentPools *agentpools.AgentPoolsClient + Machines *machines.MachinesClient + MaintenanceConfigurations *maintenanceconfigurations.MaintenanceConfigurationsClient + ManagedClusters *managedclusters.ManagedClustersClient + PrivateEndpointConnections *privateendpointconnections.PrivateEndpointConnectionsClient + PrivateLinkResources *privatelinkresources.PrivateLinkResourcesClient + ResolvePrivateLinkServiceId *resolveprivatelinkserviceid.ResolvePrivateLinkServiceIdClient + Snapshots *snapshots.SnapshotsClient + TrustedAccess *trustedaccess.TrustedAccessClient +} + +func NewClientWithBaseURI(sdkApi sdkEnv.Api, configureFunc func(c *resourcemanager.Client)) (*Client, error) { + agentPoolsClient, err := agentpools.NewAgentPoolsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building AgentPools client: %+v", err) + } + configureFunc(agentPoolsClient.Client) + + machinesClient, err := machines.NewMachinesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building Machines client: %+v", err) + } + configureFunc(machinesClient.Client) + + maintenanceConfigurationsClient, err := maintenanceconfigurations.NewMaintenanceConfigurationsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building MaintenanceConfigurations client: %+v", err) + } + configureFunc(maintenanceConfigurationsClient.Client) + + managedClustersClient, err := managedclusters.NewManagedClustersClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building ManagedClusters client: %+v", err) + } + configureFunc(managedClustersClient.Client) + + privateEndpointConnectionsClient, err := privateendpointconnections.NewPrivateEndpointConnectionsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building PrivateEndpointConnections client: %+v", err) + } + configureFunc(privateEndpointConnectionsClient.Client) + + privateLinkResourcesClient, err := privatelinkresources.NewPrivateLinkResourcesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building PrivateLinkResources client: %+v", err) + } + configureFunc(privateLinkResourcesClient.Client) + + resolvePrivateLinkServiceIdClient, err := resolveprivatelinkserviceid.NewResolvePrivateLinkServiceIdClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building ResolvePrivateLinkServiceId client: %+v", err) + } + configureFunc(resolvePrivateLinkServiceIdClient.Client) + + snapshotsClient, err := snapshots.NewSnapshotsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building Snapshots client: %+v", err) + } + configureFunc(snapshotsClient.Client) + + trustedAccessClient, err := trustedaccess.NewTrustedAccessClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building TrustedAccess client: %+v", err) + } + configureFunc(trustedAccessClient.Client) + + return &Client{ + AgentPools: agentPoolsClient, + Machines: machinesClient, + MaintenanceConfigurations: maintenanceConfigurationsClient, + ManagedClusters: managedClustersClient, + PrivateEndpointConnections: privateEndpointConnectionsClient, + PrivateLinkResources: privateLinkResourcesClient, + ResolvePrivateLinkServiceId: resolvePrivateLinkServiceIdClient, + Snapshots: snapshotsClient, + TrustedAccess: trustedAccessClient, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/machines/README.md b/resource-manager/containerservice/2025-05-01/machines/README.md new file mode 100644 index 00000000000..86dbc5cbcc1 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/README.md @@ -0,0 +1,53 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/machines` Documentation + +The `machines` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/machines" +``` + + +### Client Initialization + +```go +client := machines.NewMachinesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `MachinesClient.Get` + +```go +ctx := context.TODO() +id := machines.NewMachineID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName", "machineName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MachinesClient.List` + +```go +ctx := context.TODO() +id := machines.NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/containerservice/2025-05-01/machines/client.go b/resource-manager/containerservice/2025-05-01/machines/client.go new file mode 100644 index 00000000000..fcd8a61002f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/client.go @@ -0,0 +1,26 @@ +package machines + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MachinesClient struct { + Client *resourcemanager.Client +} + +func NewMachinesClientWithBaseURI(sdkApi sdkEnv.Api) (*MachinesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "machines", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating MachinesClient: %+v", err) + } + + return &MachinesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/machines/constants.go b/resource-manager/containerservice/2025-05-01/machines/constants.go new file mode 100644 index 00000000000..3401f05d4e3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/constants.go @@ -0,0 +1,51 @@ +package machines + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IPFamily string + +const ( + IPFamilyIPvFour IPFamily = "IPv4" + IPFamilyIPvSix IPFamily = "IPv6" +) + +func PossibleValuesForIPFamily() []string { + return []string{ + string(IPFamilyIPvFour), + string(IPFamilyIPvSix), + } +} + +func (s *IPFamily) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseIPFamily(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseIPFamily(input string) (*IPFamily, error) { + vals := map[string]IPFamily{ + "ipv4": IPFamilyIPvFour, + "ipv6": IPFamilyIPvSix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IPFamily(input) + return &out, nil +} diff --git a/resource-manager/containerservice/2025-05-01/machines/id_agentpool.go b/resource-manager/containerservice/2025-05-01/machines/id_agentpool.go new file mode 100644 index 00000000000..591db14c283 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/id_agentpool.go @@ -0,0 +1,139 @@ +package machines + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&AgentPoolId{}) +} + +var _ resourceids.ResourceId = &AgentPoolId{} + +// AgentPoolId is a struct representing the Resource ID for a Agent Pool +type AgentPoolId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + AgentPoolName string +} + +// NewAgentPoolID returns a new AgentPoolId struct +func NewAgentPoolID(subscriptionId string, resourceGroupName string, managedClusterName string, agentPoolName string) AgentPoolId { + return AgentPoolId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + AgentPoolName: agentPoolName, + } +} + +// ParseAgentPoolID parses 'input' into a AgentPoolId +func ParseAgentPoolID(input string) (*AgentPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&AgentPoolId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AgentPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseAgentPoolIDInsensitively parses 'input' case-insensitively into a AgentPoolId +// note: this method should only be used for API response data and not user input +func ParseAgentPoolIDInsensitively(input string) (*AgentPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&AgentPoolId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AgentPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *AgentPoolId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.AgentPoolName, ok = input.Parsed["agentPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "agentPoolName", input) + } + + return nil +} + +// ValidateAgentPoolID checks that 'input' can be parsed as a Agent Pool ID +func ValidateAgentPoolID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAgentPoolID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Agent Pool ID +func (id AgentPoolId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/agentPools/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.AgentPoolName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Agent Pool ID +func (id AgentPoolId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticAgentPools", "agentPools", "agentPools"), + resourceids.UserSpecifiedSegment("agentPoolName", "agentPoolName"), + } +} + +// String returns a human-readable description of this Agent Pool ID +func (id AgentPoolId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Agent Pool Name: %q", id.AgentPoolName), + } + return fmt.Sprintf("Agent Pool (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/machines/id_agentpool_test.go b/resource-manager/containerservice/2025-05-01/machines/id_agentpool_test.go new file mode 100644 index 00000000000..a8bfe45062b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/id_agentpool_test.go @@ -0,0 +1,327 @@ +package machines + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &AgentPoolId{} + +func TestNewAgentPoolID(t *testing.T) { + id := NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.AgentPoolName != "agentPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'AgentPoolName'", id.AgentPoolName, "agentPoolName") + } +} + +func TestFormatAgentPoolID(t *testing.T) { + actual := NewAgentPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseAgentPoolID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AgentPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName", + Expected: &AgentPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AgentPoolName: "agentPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAgentPoolID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AgentPoolName != v.Expected.AgentPoolName { + t.Fatalf("Expected %q but got %q for AgentPoolName", v.Expected.AgentPoolName, actual.AgentPoolName) + } + + } +} + +func TestParseAgentPoolIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AgentPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName", + Expected: &AgentPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AgentPoolName: "agentPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe", + Expected: &AgentPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + AgentPoolName: "aGeNtPoOlNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAgentPoolIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AgentPoolName != v.Expected.AgentPoolName { + t.Fatalf("Expected %q but got %q for AgentPoolName", v.Expected.AgentPoolName, actual.AgentPoolName) + } + + } +} + +func TestSegmentsForAgentPoolId(t *testing.T) { + segments := AgentPoolId{}.Segments() + if len(segments) == 0 { + t.Fatalf("AgentPoolId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/machines/id_machine.go b/resource-manager/containerservice/2025-05-01/machines/id_machine.go new file mode 100644 index 00000000000..3cf8b2ccbf0 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/id_machine.go @@ -0,0 +1,148 @@ +package machines + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&MachineId{}) +} + +var _ resourceids.ResourceId = &MachineId{} + +// MachineId is a struct representing the Resource ID for a Machine +type MachineId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + AgentPoolName string + MachineName string +} + +// NewMachineID returns a new MachineId struct +func NewMachineID(subscriptionId string, resourceGroupName string, managedClusterName string, agentPoolName string, machineName string) MachineId { + return MachineId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + AgentPoolName: agentPoolName, + MachineName: machineName, + } +} + +// ParseMachineID parses 'input' into a MachineId +func ParseMachineID(input string) (*MachineId, error) { + parser := resourceids.NewParserFromResourceIdType(&MachineId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MachineId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseMachineIDInsensitively parses 'input' case-insensitively into a MachineId +// note: this method should only be used for API response data and not user input +func ParseMachineIDInsensitively(input string) (*MachineId, error) { + parser := resourceids.NewParserFromResourceIdType(&MachineId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MachineId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *MachineId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.AgentPoolName, ok = input.Parsed["agentPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "agentPoolName", input) + } + + if id.MachineName, ok = input.Parsed["machineName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "machineName", input) + } + + return nil +} + +// ValidateMachineID checks that 'input' can be parsed as a Machine ID +func ValidateMachineID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMachineID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Machine ID +func (id MachineId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/agentPools/%s/machines/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.AgentPoolName, id.MachineName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Machine ID +func (id MachineId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticAgentPools", "agentPools", "agentPools"), + resourceids.UserSpecifiedSegment("agentPoolName", "agentPoolName"), + resourceids.StaticSegment("staticMachines", "machines", "machines"), + resourceids.UserSpecifiedSegment("machineName", "machineName"), + } +} + +// String returns a human-readable description of this Machine ID +func (id MachineId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Agent Pool Name: %q", id.AgentPoolName), + fmt.Sprintf("Machine Name: %q", id.MachineName), + } + return fmt.Sprintf("Machine (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/machines/id_machine_test.go b/resource-manager/containerservice/2025-05-01/machines/id_machine_test.go new file mode 100644 index 00000000000..a94383704e7 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/id_machine_test.go @@ -0,0 +1,372 @@ +package machines + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &MachineId{} + +func TestNewMachineID(t *testing.T) { + id := NewMachineID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName", "machineName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.AgentPoolName != "agentPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'AgentPoolName'", id.AgentPoolName, "agentPoolName") + } + + if id.MachineName != "machineName" { + t.Fatalf("Expected %q but got %q for Segment 'MachineName'", id.MachineName, "machineName") + } +} + +func TestFormatMachineID(t *testing.T) { + actual := NewMachineID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "agentPoolName", "machineName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/machines/machineName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseMachineID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MachineId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/machines", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/machines/machineName", + Expected: &MachineId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AgentPoolName: "agentPoolName", + MachineName: "machineName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/machines/machineName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMachineID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AgentPoolName != v.Expected.AgentPoolName { + t.Fatalf("Expected %q but got %q for AgentPoolName", v.Expected.AgentPoolName, actual.AgentPoolName) + } + + if actual.MachineName != v.Expected.MachineName { + t.Fatalf("Expected %q but got %q for MachineName", v.Expected.MachineName, actual.MachineName) + } + + } +} + +func TestParseMachineIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MachineId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/machines", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe/mAcHiNeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/machines/machineName", + Expected: &MachineId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AgentPoolName: "agentPoolName", + MachineName: "machineName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/agentPools/agentPoolName/machines/machineName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe/mAcHiNeS/mAcHiNeNaMe", + Expected: &MachineId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + AgentPoolName: "aGeNtPoOlNaMe", + MachineName: "mAcHiNeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aGeNtPoOlS/aGeNtPoOlNaMe/mAcHiNeS/mAcHiNeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMachineIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AgentPoolName != v.Expected.AgentPoolName { + t.Fatalf("Expected %q but got %q for AgentPoolName", v.Expected.AgentPoolName, actual.AgentPoolName) + } + + if actual.MachineName != v.Expected.MachineName { + t.Fatalf("Expected %q but got %q for MachineName", v.Expected.MachineName, actual.MachineName) + } + + } +} + +func TestSegmentsForMachineId(t *testing.T) { + segments := MachineId{}.Segments() + if len(segments) == 0 { + t.Fatalf("MachineId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/machines/method_get.go b/resource-manager/containerservice/2025-05-01/machines/method_get.go new file mode 100644 index 00000000000..14636e392c1 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/method_get.go @@ -0,0 +1,53 @@ +package machines + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Machine +} + +// Get ... +func (c MachinesClient) Get(ctx context.Context, id MachineId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Machine + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/machines/method_list.go b/resource-manager/containerservice/2025-05-01/machines/method_list.go new file mode 100644 index 00000000000..949d16fc5e3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/method_list.go @@ -0,0 +1,105 @@ +package machines + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Machine +} + +type ListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Machine +} + +type ListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// List ... +func (c MachinesClient) List(ctx context.Context, id AgentPoolId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListCustomPager{}, + Path: fmt.Sprintf("%s/machines", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Machine `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListComplete retrieves all the results into a single object +func (c MachinesClient) ListComplete(ctx context.Context, id AgentPoolId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, MachineOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c MachinesClient) ListCompleteMatchingPredicate(ctx context.Context, id AgentPoolId, predicate MachineOperationPredicate) (result ListCompleteResult, err error) { + items := make([]Machine, 0) + + resp, err := c.List(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/machines/model_machine.go b/resource-manager/containerservice/2025-05-01/machines/model_machine.go new file mode 100644 index 00000000000..f7d894181bd --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/model_machine.go @@ -0,0 +1,16 @@ +package machines + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Machine struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *MachineProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` + Zones *zones.Schema `json:"zones,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/machines/model_machineipaddress.go b/resource-manager/containerservice/2025-05-01/machines/model_machineipaddress.go new file mode 100644 index 00000000000..213cc86efcc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/model_machineipaddress.go @@ -0,0 +1,9 @@ +package machines + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MachineIPAddress struct { + Family *IPFamily `json:"family,omitempty"` + IP *string `json:"ip,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/machines/model_machinenetworkproperties.go b/resource-manager/containerservice/2025-05-01/machines/model_machinenetworkproperties.go new file mode 100644 index 00000000000..5183ee2722f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/model_machinenetworkproperties.go @@ -0,0 +1,8 @@ +package machines + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MachineNetworkProperties struct { + IPAddresses *[]MachineIPAddress `json:"ipAddresses,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/machines/model_machineproperties.go b/resource-manager/containerservice/2025-05-01/machines/model_machineproperties.go new file mode 100644 index 00000000000..041457c0e7c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/model_machineproperties.go @@ -0,0 +1,9 @@ +package machines + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MachineProperties struct { + Network *MachineNetworkProperties `json:"network,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/machines/predicates.go b/resource-manager/containerservice/2025-05-01/machines/predicates.go new file mode 100644 index 00000000000..ebc0babfdc2 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/predicates.go @@ -0,0 +1,27 @@ +package machines + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MachineOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p MachineOperationPredicate) Matches(input Machine) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/containerservice/2025-05-01/machines/version.go b/resource-manager/containerservice/2025-05-01/machines/version.go new file mode 100644 index 00000000000..775a43eda2b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/machines/version.go @@ -0,0 +1,10 @@ +package machines + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/machines/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/README.md b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/README.md new file mode 100644 index 00000000000..10427ea78ce --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/README.md @@ -0,0 +1,91 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/maintenanceconfigurations` Documentation + +The `maintenanceconfigurations` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/maintenanceconfigurations" +``` + + +### Client Initialization + +```go +client := maintenanceconfigurations.NewMaintenanceConfigurationsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `MaintenanceConfigurationsClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := maintenanceconfigurations.NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "maintenanceConfigurationName") + +payload := maintenanceconfigurations.MaintenanceConfiguration{ + // ... +} + + +read, err := client.CreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MaintenanceConfigurationsClient.Delete` + +```go +ctx := context.TODO() +id := maintenanceconfigurations.NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "maintenanceConfigurationName") + +read, err := client.Delete(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MaintenanceConfigurationsClient.Get` + +```go +ctx := context.TODO() +id := maintenanceconfigurations.NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "maintenanceConfigurationName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MaintenanceConfigurationsClient.ListByManagedCluster` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +// alternatively `client.ListByManagedCluster(ctx, id)` can be used to do batched pagination +items, err := client.ListByManagedClusterComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/client.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/client.go new file mode 100644 index 00000000000..bcb48de3779 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/client.go @@ -0,0 +1,26 @@ +package maintenanceconfigurations + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceConfigurationsClient struct { + Client *resourcemanager.Client +} + +func NewMaintenanceConfigurationsClientWithBaseURI(sdkApi sdkEnv.Api) (*MaintenanceConfigurationsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "maintenanceconfigurations", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating MaintenanceConfigurationsClient: %+v", err) + } + + return &MaintenanceConfigurationsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/constants.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/constants.go new file mode 100644 index 00000000000..2fdd49e9763 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/constants.go @@ -0,0 +1,116 @@ +package maintenanceconfigurations + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Type string + +const ( + TypeFirst Type = "First" + TypeFourth Type = "Fourth" + TypeLast Type = "Last" + TypeSecond Type = "Second" + TypeThird Type = "Third" +) + +func PossibleValuesForType() []string { + return []string{ + string(TypeFirst), + string(TypeFourth), + string(TypeLast), + string(TypeSecond), + string(TypeThird), + } +} + +func (s *Type) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseType(input string) (*Type, error) { + vals := map[string]Type{ + "first": TypeFirst, + "fourth": TypeFourth, + "last": TypeLast, + "second": TypeSecond, + "third": TypeThird, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Type(input) + return &out, nil +} + +type WeekDay string + +const ( + WeekDayFriday WeekDay = "Friday" + WeekDayMonday WeekDay = "Monday" + WeekDaySaturday WeekDay = "Saturday" + WeekDaySunday WeekDay = "Sunday" + WeekDayThursday WeekDay = "Thursday" + WeekDayTuesday WeekDay = "Tuesday" + WeekDayWednesday WeekDay = "Wednesday" +) + +func PossibleValuesForWeekDay() []string { + return []string{ + string(WeekDayFriday), + string(WeekDayMonday), + string(WeekDaySaturday), + string(WeekDaySunday), + string(WeekDayThursday), + string(WeekDayTuesday), + string(WeekDayWednesday), + } +} + +func (s *WeekDay) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseWeekDay(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseWeekDay(input string) (*WeekDay, error) { + vals := map[string]WeekDay{ + "friday": WeekDayFriday, + "monday": WeekDayMonday, + "saturday": WeekDaySaturday, + "sunday": WeekDaySunday, + "thursday": WeekDayThursday, + "tuesday": WeekDayTuesday, + "wednesday": WeekDayWednesday, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := WeekDay(input) + return &out, nil +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/id_maintenanceconfiguration.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/id_maintenanceconfiguration.go new file mode 100644 index 00000000000..a06e5519459 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/id_maintenanceconfiguration.go @@ -0,0 +1,139 @@ +package maintenanceconfigurations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&MaintenanceConfigurationId{}) +} + +var _ resourceids.ResourceId = &MaintenanceConfigurationId{} + +// MaintenanceConfigurationId is a struct representing the Resource ID for a Maintenance Configuration +type MaintenanceConfigurationId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + MaintenanceConfigurationName string +} + +// NewMaintenanceConfigurationID returns a new MaintenanceConfigurationId struct +func NewMaintenanceConfigurationID(subscriptionId string, resourceGroupName string, managedClusterName string, maintenanceConfigurationName string) MaintenanceConfigurationId { + return MaintenanceConfigurationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + MaintenanceConfigurationName: maintenanceConfigurationName, + } +} + +// ParseMaintenanceConfigurationID parses 'input' into a MaintenanceConfigurationId +func ParseMaintenanceConfigurationID(input string) (*MaintenanceConfigurationId, error) { + parser := resourceids.NewParserFromResourceIdType(&MaintenanceConfigurationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MaintenanceConfigurationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseMaintenanceConfigurationIDInsensitively parses 'input' case-insensitively into a MaintenanceConfigurationId +// note: this method should only be used for API response data and not user input +func ParseMaintenanceConfigurationIDInsensitively(input string) (*MaintenanceConfigurationId, error) { + parser := resourceids.NewParserFromResourceIdType(&MaintenanceConfigurationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MaintenanceConfigurationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *MaintenanceConfigurationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.MaintenanceConfigurationName, ok = input.Parsed["maintenanceConfigurationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "maintenanceConfigurationName", input) + } + + return nil +} + +// ValidateMaintenanceConfigurationID checks that 'input' can be parsed as a Maintenance Configuration ID +func ValidateMaintenanceConfigurationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMaintenanceConfigurationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Maintenance Configuration ID +func (id MaintenanceConfigurationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/maintenanceConfigurations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.MaintenanceConfigurationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Maintenance Configuration ID +func (id MaintenanceConfigurationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticMaintenanceConfigurations", "maintenanceConfigurations", "maintenanceConfigurations"), + resourceids.UserSpecifiedSegment("maintenanceConfigurationName", "maintenanceConfigurationName"), + } +} + +// String returns a human-readable description of this Maintenance Configuration ID +func (id MaintenanceConfigurationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Maintenance Configuration Name: %q", id.MaintenanceConfigurationName), + } + return fmt.Sprintf("Maintenance Configuration (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/id_maintenanceconfiguration_test.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/id_maintenanceconfiguration_test.go new file mode 100644 index 00000000000..d5d0ad17780 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/id_maintenanceconfiguration_test.go @@ -0,0 +1,327 @@ +package maintenanceconfigurations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &MaintenanceConfigurationId{} + +func TestNewMaintenanceConfigurationID(t *testing.T) { + id := NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "maintenanceConfigurationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.MaintenanceConfigurationName != "maintenanceConfigurationName" { + t.Fatalf("Expected %q but got %q for Segment 'MaintenanceConfigurationName'", id.MaintenanceConfigurationName, "maintenanceConfigurationName") + } +} + +func TestFormatMaintenanceConfigurationID(t *testing.T) { + actual := NewMaintenanceConfigurationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "maintenanceConfigurationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/maintenanceConfigurations/maintenanceConfigurationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseMaintenanceConfigurationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MaintenanceConfigurationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/maintenanceConfigurations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/maintenanceConfigurations/maintenanceConfigurationName", + Expected: &MaintenanceConfigurationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + MaintenanceConfigurationName: "maintenanceConfigurationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/maintenanceConfigurations/maintenanceConfigurationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMaintenanceConfigurationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.MaintenanceConfigurationName != v.Expected.MaintenanceConfigurationName { + t.Fatalf("Expected %q but got %q for MaintenanceConfigurationName", v.Expected.MaintenanceConfigurationName, actual.MaintenanceConfigurationName) + } + + } +} + +func TestParseMaintenanceConfigurationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MaintenanceConfigurationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/maintenanceConfigurations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/mAiNtEnAnCeCoNfIgUrAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/maintenanceConfigurations/maintenanceConfigurationName", + Expected: &MaintenanceConfigurationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + MaintenanceConfigurationName: "maintenanceConfigurationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/maintenanceConfigurations/maintenanceConfigurationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/mAiNtEnAnCeCoNfIgUrAtIoNs/mAiNtEnAnCeCoNfIgUrAtIoNnAmE", + Expected: &MaintenanceConfigurationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + MaintenanceConfigurationName: "mAiNtEnAnCeCoNfIgUrAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/mAiNtEnAnCeCoNfIgUrAtIoNs/mAiNtEnAnCeCoNfIgUrAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMaintenanceConfigurationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.MaintenanceConfigurationName != v.Expected.MaintenanceConfigurationName { + t.Fatalf("Expected %q but got %q for MaintenanceConfigurationName", v.Expected.MaintenanceConfigurationName, actual.MaintenanceConfigurationName) + } + + } +} + +func TestSegmentsForMaintenanceConfigurationId(t *testing.T) { + segments := MaintenanceConfigurationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("MaintenanceConfigurationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_createorupdate.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_createorupdate.go new file mode 100644 index 00000000000..a6e8fc75722 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_createorupdate.go @@ -0,0 +1,58 @@ +package maintenanceconfigurations + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *MaintenanceConfiguration +} + +// CreateOrUpdate ... +func (c MaintenanceConfigurationsClient) CreateOrUpdate(ctx context.Context, id MaintenanceConfigurationId, input MaintenanceConfiguration) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model MaintenanceConfiguration + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_delete.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_delete.go new file mode 100644 index 00000000000..5993be4c15a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_delete.go @@ -0,0 +1,47 @@ +package maintenanceconfigurations + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c MaintenanceConfigurationsClient) Delete(ctx context.Context, id MaintenanceConfigurationId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_get.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_get.go new file mode 100644 index 00000000000..3989652ca96 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_get.go @@ -0,0 +1,53 @@ +package maintenanceconfigurations + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *MaintenanceConfiguration +} + +// Get ... +func (c MaintenanceConfigurationsClient) Get(ctx context.Context, id MaintenanceConfigurationId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model MaintenanceConfiguration + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_listbymanagedcluster.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_listbymanagedcluster.go new file mode 100644 index 00000000000..f87e9032ed5 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/method_listbymanagedcluster.go @@ -0,0 +1,106 @@ +package maintenanceconfigurations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByManagedClusterOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]MaintenanceConfiguration +} + +type ListByManagedClusterCompleteResult struct { + LatestHttpResponse *http.Response + Items []MaintenanceConfiguration +} + +type ListByManagedClusterCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByManagedClusterCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByManagedCluster ... +func (c MaintenanceConfigurationsClient) ListByManagedCluster(ctx context.Context, id commonids.KubernetesClusterId) (result ListByManagedClusterOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByManagedClusterCustomPager{}, + Path: fmt.Sprintf("%s/maintenanceConfigurations", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]MaintenanceConfiguration `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByManagedClusterComplete retrieves all the results into a single object +func (c MaintenanceConfigurationsClient) ListByManagedClusterComplete(ctx context.Context, id commonids.KubernetesClusterId) (ListByManagedClusterCompleteResult, error) { + return c.ListByManagedClusterCompleteMatchingPredicate(ctx, id, MaintenanceConfigurationOperationPredicate{}) +} + +// ListByManagedClusterCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c MaintenanceConfigurationsClient) ListByManagedClusterCompleteMatchingPredicate(ctx context.Context, id commonids.KubernetesClusterId, predicate MaintenanceConfigurationOperationPredicate) (result ListByManagedClusterCompleteResult, err error) { + items := make([]MaintenanceConfiguration, 0) + + resp, err := c.ListByManagedCluster(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByManagedClusterCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_absolutemonthlyschedule.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_absolutemonthlyschedule.go new file mode 100644 index 00000000000..ca6966896ff --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_absolutemonthlyschedule.go @@ -0,0 +1,9 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AbsoluteMonthlySchedule struct { + DayOfMonth int64 `json:"dayOfMonth"` + IntervalMonths int64 `json:"intervalMonths"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_dailyschedule.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_dailyschedule.go new file mode 100644 index 00000000000..57fb3e994dd --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_dailyschedule.go @@ -0,0 +1,8 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DailySchedule struct { + IntervalDays int64 `json:"intervalDays"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_datespan.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_datespan.go new file mode 100644 index 00000000000..863c4704427 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_datespan.go @@ -0,0 +1,9 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DateSpan struct { + End string `json:"end"` + Start string `json:"start"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenanceconfiguration.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenanceconfiguration.go new file mode 100644 index 00000000000..9066e53bff3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenanceconfiguration.go @@ -0,0 +1,16 @@ +package maintenanceconfigurations + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceConfiguration struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *MaintenanceConfigurationProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenanceconfigurationproperties.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenanceconfigurationproperties.go new file mode 100644 index 00000000000..d7c91bcf9f6 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenanceconfigurationproperties.go @@ -0,0 +1,10 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceConfigurationProperties struct { + MaintenanceWindow *MaintenanceWindow `json:"maintenanceWindow,omitempty"` + NotAllowedTime *[]TimeSpan `json:"notAllowedTime,omitempty"` + TimeInWeek *[]TimeInWeek `json:"timeInWeek,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenancewindow.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenancewindow.go new file mode 100644 index 00000000000..8f1a42f818c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_maintenancewindow.go @@ -0,0 +1,13 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceWindow struct { + DurationHours int64 `json:"durationHours"` + NotAllowedDates *[]DateSpan `json:"notAllowedDates,omitempty"` + Schedule Schedule `json:"schedule"` + StartDate *string `json:"startDate,omitempty"` + StartTime string `json:"startTime"` + UtcOffset *string `json:"utcOffset,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_relativemonthlyschedule.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_relativemonthlyschedule.go new file mode 100644 index 00000000000..0dfc104da98 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_relativemonthlyschedule.go @@ -0,0 +1,10 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RelativeMonthlySchedule struct { + DayOfWeek WeekDay `json:"dayOfWeek"` + IntervalMonths int64 `json:"intervalMonths"` + WeekIndex Type `json:"weekIndex"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_schedule.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_schedule.go new file mode 100644 index 00000000000..cc4397aba4c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_schedule.go @@ -0,0 +1,11 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Schedule struct { + AbsoluteMonthly *AbsoluteMonthlySchedule `json:"absoluteMonthly,omitempty"` + Daily *DailySchedule `json:"daily,omitempty"` + RelativeMonthly *RelativeMonthlySchedule `json:"relativeMonthly,omitempty"` + Weekly *WeeklySchedule `json:"weekly,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_timeinweek.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_timeinweek.go new file mode 100644 index 00000000000..cba259f76fc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_timeinweek.go @@ -0,0 +1,9 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TimeInWeek struct { + Day *WeekDay `json:"day,omitempty"` + HourSlots *[]int64 `json:"hourSlots,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_timespan.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_timespan.go new file mode 100644 index 00000000000..7bbc8c5ba72 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_timespan.go @@ -0,0 +1,39 @@ +package maintenanceconfigurations + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TimeSpan struct { + End *string `json:"end,omitempty"` + Start *string `json:"start,omitempty"` +} + +func (o *TimeSpan) GetEndAsTime() (*time.Time, error) { + if o.End == nil { + return nil, nil + } + return dates.ParseAsFormat(o.End, "2006-01-02T15:04:05Z07:00") +} + +func (o *TimeSpan) SetEndAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.End = &formatted +} + +func (o *TimeSpan) GetStartAsTime() (*time.Time, error) { + if o.Start == nil { + return nil, nil + } + return dates.ParseAsFormat(o.Start, "2006-01-02T15:04:05Z07:00") +} + +func (o *TimeSpan) SetStartAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.Start = &formatted +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_weeklyschedule.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_weeklyschedule.go new file mode 100644 index 00000000000..ed775ad784e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/model_weeklyschedule.go @@ -0,0 +1,9 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WeeklySchedule struct { + DayOfWeek WeekDay `json:"dayOfWeek"` + IntervalWeeks int64 `json:"intervalWeeks"` +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/predicates.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/predicates.go new file mode 100644 index 00000000000..c7eedb4fa7b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/predicates.go @@ -0,0 +1,27 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MaintenanceConfigurationOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p MaintenanceConfigurationOperationPredicate) Matches(input MaintenanceConfiguration) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/version.go b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/version.go new file mode 100644 index 00000000000..299d182c64f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/maintenanceconfigurations/version.go @@ -0,0 +1,10 @@ +package maintenanceconfigurations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/maintenanceconfigurations/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/README.md b/resource-manager/containerservice/2025-05-01/managedclusters/README.md new file mode 100644 index 00000000000..8846f3bfffb --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/README.md @@ -0,0 +1,423 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/managedclusters` Documentation + +The `managedclusters` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/managedclusters" +``` + + +### Client Initialization + +```go +client := managedclusters.NewManagedClustersClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `ManagedClustersClient.AbortLatestOperation` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +if err := client.AbortLatestOperationThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +payload := managedclusters.ManagedCluster{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload, managedclusters.DefaultCreateOrUpdateOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Delete` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +if err := client.DeleteThenPoll(ctx, id, managedclusters.DefaultDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Get` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetAccessProfile` + +```go +ctx := context.TODO() +id := managedclusters.NewAccessProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "accessProfileName") + +read, err := client.GetAccessProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetCommandResult` + +```go +ctx := context.TODO() +id := managedclusters.NewCommandResultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "commandId") + +read, err := client.GetCommandResult(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetMeshRevisionProfile` + +```go +ctx := context.TODO() +id := managedclusters.NewMeshRevisionProfileID("12345678-1234-9876-4563-123456789012", "locationName", "meshRevisionProfileName") + +read, err := client.GetMeshRevisionProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetMeshUpgradeProfile` + +```go +ctx := context.TODO() +id := managedclusters.NewMeshUpgradeProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "meshUpgradeProfileName") + +read, err := client.GetMeshUpgradeProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.GetUpgradeProfile` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.GetUpgradeProfile(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.List` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := commonids.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ListClusterAdminCredentials` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.ListClusterAdminCredentials(ctx, id, managedclusters.DefaultListClusterAdminCredentialsOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.ListClusterMonitoringUserCredentials` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.ListClusterMonitoringUserCredentials(ctx, id, managedclusters.DefaultListClusterMonitoringUserCredentialsOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.ListClusterUserCredentials` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.ListClusterUserCredentials(ctx, id, managedclusters.DefaultListClusterUserCredentialsOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.ListKubernetesVersions` + +```go +ctx := context.TODO() +id := managedclusters.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +read, err := client.ListKubernetesVersions(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ManagedClustersClient.ListMeshRevisionProfiles` + +```go +ctx := context.TODO() +id := managedclusters.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.ListMeshRevisionProfiles(ctx, id)` can be used to do batched pagination +items, err := client.ListMeshRevisionProfilesComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ListMeshUpgradeProfiles` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +// alternatively `client.ListMeshUpgradeProfiles(ctx, id)` can be used to do batched pagination +items, err := client.ListMeshUpgradeProfilesComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ListOutboundNetworkDependenciesEndpoints` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +// alternatively `client.ListOutboundNetworkDependenciesEndpoints(ctx, id)` can be used to do batched pagination +items, err := client.ListOutboundNetworkDependenciesEndpointsComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ManagedClustersClient.ResetAADProfile` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +payload := managedclusters.ManagedClusterAADProfile{ + // ... +} + + +if err := client.ResetAADProfileThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.ResetServicePrincipalProfile` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +payload := managedclusters.ManagedClusterServicePrincipalProfile{ + // ... +} + + +if err := client.ResetServicePrincipalProfileThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.RotateClusterCertificates` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +if err := client.RotateClusterCertificatesThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.RotateServiceAccountSigningKeys` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +if err := client.RotateServiceAccountSigningKeysThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.RunCommand` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +payload := managedclusters.RunCommandRequest{ + // ... +} + + +if err := client.RunCommandThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Start` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +if err := client.StartThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.Stop` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +if err := client.StopThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ManagedClustersClient.UpdateTags` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +payload := managedclusters.TagsObject{ + // ... +} + + +if err := client.UpdateTagsThenPoll(ctx, id, payload, managedclusters.DefaultUpdateTagsOperationOptions()); err != nil { + // handle the error +} +``` diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/client.go b/resource-manager/containerservice/2025-05-01/managedclusters/client.go new file mode 100644 index 00000000000..a2037a7e28b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/client.go @@ -0,0 +1,26 @@ +package managedclusters + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClustersClient struct { + Client *resourcemanager.Client +} + +func NewManagedClustersClientWithBaseURI(sdkApi sdkEnv.Api) (*ManagedClustersClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "managedclusters", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating ManagedClustersClient: %+v", err) + } + + return &ManagedClustersClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/constants.go b/resource-manager/containerservice/2025-05-01/managedclusters/constants.go new file mode 100644 index 00000000000..df31ab1aff8 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/constants.go @@ -0,0 +1,1854 @@ +package managedclusters + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolMode string + +const ( + AgentPoolModeGateway AgentPoolMode = "Gateway" + AgentPoolModeSystem AgentPoolMode = "System" + AgentPoolModeUser AgentPoolMode = "User" +) + +func PossibleValuesForAgentPoolMode() []string { + return []string{ + string(AgentPoolModeGateway), + string(AgentPoolModeSystem), + string(AgentPoolModeUser), + } +} + +func (s *AgentPoolMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAgentPoolMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAgentPoolMode(input string) (*AgentPoolMode, error) { + vals := map[string]AgentPoolMode{ + "gateway": AgentPoolModeGateway, + "system": AgentPoolModeSystem, + "user": AgentPoolModeUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolMode(input) + return &out, nil +} + +type AgentPoolType string + +const ( + AgentPoolTypeAvailabilitySet AgentPoolType = "AvailabilitySet" + AgentPoolTypeVirtualMachineScaleSets AgentPoolType = "VirtualMachineScaleSets" + AgentPoolTypeVirtualMachines AgentPoolType = "VirtualMachines" +) + +func PossibleValuesForAgentPoolType() []string { + return []string{ + string(AgentPoolTypeAvailabilitySet), + string(AgentPoolTypeVirtualMachineScaleSets), + string(AgentPoolTypeVirtualMachines), + } +} + +func (s *AgentPoolType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAgentPoolType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAgentPoolType(input string) (*AgentPoolType, error) { + vals := map[string]AgentPoolType{ + "availabilityset": AgentPoolTypeAvailabilitySet, + "virtualmachinescalesets": AgentPoolTypeVirtualMachineScaleSets, + "virtualmachines": AgentPoolTypeVirtualMachines, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AgentPoolType(input) + return &out, nil +} + +type ArtifactSource string + +const ( + ArtifactSourceCache ArtifactSource = "Cache" + ArtifactSourceDirect ArtifactSource = "Direct" +) + +func PossibleValuesForArtifactSource() []string { + return []string{ + string(ArtifactSourceCache), + string(ArtifactSourceDirect), + } +} + +func (s *ArtifactSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseArtifactSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseArtifactSource(input string) (*ArtifactSource, error) { + vals := map[string]ArtifactSource{ + "cache": ArtifactSourceCache, + "direct": ArtifactSourceDirect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ArtifactSource(input) + return &out, nil +} + +type BackendPoolType string + +const ( + BackendPoolTypeNodeIP BackendPoolType = "NodeIP" + BackendPoolTypeNodeIPConfiguration BackendPoolType = "NodeIPConfiguration" +) + +func PossibleValuesForBackendPoolType() []string { + return []string{ + string(BackendPoolTypeNodeIP), + string(BackendPoolTypeNodeIPConfiguration), + } +} + +func (s *BackendPoolType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackendPoolType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackendPoolType(input string) (*BackendPoolType, error) { + vals := map[string]BackendPoolType{ + "nodeip": BackendPoolTypeNodeIP, + "nodeipconfiguration": BackendPoolTypeNodeIPConfiguration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackendPoolType(input) + return &out, nil +} + +type Code string + +const ( + CodeRunning Code = "Running" + CodeStopped Code = "Stopped" +) + +func PossibleValuesForCode() []string { + return []string{ + string(CodeRunning), + string(CodeStopped), + } +} + +func (s *Code) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCode(input string) (*Code, error) { + vals := map[string]Code{ + "running": CodeRunning, + "stopped": CodeStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Code(input) + return &out, nil +} + +type Expander string + +const ( + ExpanderLeastNegativewaste Expander = "least-waste" + ExpanderMostNegativepods Expander = "most-pods" + ExpanderPriority Expander = "priority" + ExpanderRandom Expander = "random" +) + +func PossibleValuesForExpander() []string { + return []string{ + string(ExpanderLeastNegativewaste), + string(ExpanderMostNegativepods), + string(ExpanderPriority), + string(ExpanderRandom), + } +} + +func (s *Expander) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseExpander(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseExpander(input string) (*Expander, error) { + vals := map[string]Expander{ + "least-waste": ExpanderLeastNegativewaste, + "most-pods": ExpanderMostNegativepods, + "priority": ExpanderPriority, + "random": ExpanderRandom, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Expander(input) + return &out, nil +} + +type Format string + +const ( + FormatAzure Format = "azure" + FormatExec Format = "exec" +) + +func PossibleValuesForFormat() []string { + return []string{ + string(FormatAzure), + string(FormatExec), + } +} + +func (s *Format) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseFormat(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseFormat(input string) (*Format, error) { + vals := map[string]Format{ + "azure": FormatAzure, + "exec": FormatExec, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Format(input) + return &out, nil +} + +type GPUDriver string + +const ( + GPUDriverInstall GPUDriver = "Install" + GPUDriverNone GPUDriver = "None" +) + +func PossibleValuesForGPUDriver() []string { + return []string{ + string(GPUDriverInstall), + string(GPUDriverNone), + } +} + +func (s *GPUDriver) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseGPUDriver(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseGPUDriver(input string) (*GPUDriver, error) { + vals := map[string]GPUDriver{ + "install": GPUDriverInstall, + "none": GPUDriverNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := GPUDriver(input) + return &out, nil +} + +type GPUInstanceProfile string + +const ( + GPUInstanceProfileMIGFourg GPUInstanceProfile = "MIG4g" + GPUInstanceProfileMIGOneg GPUInstanceProfile = "MIG1g" + GPUInstanceProfileMIGSeveng GPUInstanceProfile = "MIG7g" + GPUInstanceProfileMIGThreeg GPUInstanceProfile = "MIG3g" + GPUInstanceProfileMIGTwog GPUInstanceProfile = "MIG2g" +) + +func PossibleValuesForGPUInstanceProfile() []string { + return []string{ + string(GPUInstanceProfileMIGFourg), + string(GPUInstanceProfileMIGOneg), + string(GPUInstanceProfileMIGSeveng), + string(GPUInstanceProfileMIGThreeg), + string(GPUInstanceProfileMIGTwog), + } +} + +func (s *GPUInstanceProfile) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseGPUInstanceProfile(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseGPUInstanceProfile(input string) (*GPUInstanceProfile, error) { + vals := map[string]GPUInstanceProfile{ + "mig4g": GPUInstanceProfileMIGFourg, + "mig1g": GPUInstanceProfileMIGOneg, + "mig7g": GPUInstanceProfileMIGSeveng, + "mig3g": GPUInstanceProfileMIGThreeg, + "mig2g": GPUInstanceProfileMIGTwog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := GPUInstanceProfile(input) + return &out, nil +} + +type IPFamily string + +const ( + IPFamilyIPvFour IPFamily = "IPv4" + IPFamilyIPvSix IPFamily = "IPv6" +) + +func PossibleValuesForIPFamily() []string { + return []string{ + string(IPFamilyIPvFour), + string(IPFamilyIPvSix), + } +} + +func (s *IPFamily) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseIPFamily(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseIPFamily(input string) (*IPFamily, error) { + vals := map[string]IPFamily{ + "ipv4": IPFamilyIPvFour, + "ipv6": IPFamilyIPvSix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IPFamily(input) + return &out, nil +} + +type IstioIngressGatewayMode string + +const ( + IstioIngressGatewayModeExternal IstioIngressGatewayMode = "External" + IstioIngressGatewayModeInternal IstioIngressGatewayMode = "Internal" +) + +func PossibleValuesForIstioIngressGatewayMode() []string { + return []string{ + string(IstioIngressGatewayModeExternal), + string(IstioIngressGatewayModeInternal), + } +} + +func (s *IstioIngressGatewayMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseIstioIngressGatewayMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseIstioIngressGatewayMode(input string) (*IstioIngressGatewayMode, error) { + vals := map[string]IstioIngressGatewayMode{ + "external": IstioIngressGatewayModeExternal, + "internal": IstioIngressGatewayModeInternal, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IstioIngressGatewayMode(input) + return &out, nil +} + +type KeyVaultNetworkAccessTypes string + +const ( + KeyVaultNetworkAccessTypesPrivate KeyVaultNetworkAccessTypes = "Private" + KeyVaultNetworkAccessTypesPublic KeyVaultNetworkAccessTypes = "Public" +) + +func PossibleValuesForKeyVaultNetworkAccessTypes() []string { + return []string{ + string(KeyVaultNetworkAccessTypesPrivate), + string(KeyVaultNetworkAccessTypesPublic), + } +} + +func (s *KeyVaultNetworkAccessTypes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseKeyVaultNetworkAccessTypes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseKeyVaultNetworkAccessTypes(input string) (*KeyVaultNetworkAccessTypes, error) { + vals := map[string]KeyVaultNetworkAccessTypes{ + "private": KeyVaultNetworkAccessTypesPrivate, + "public": KeyVaultNetworkAccessTypesPublic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyVaultNetworkAccessTypes(input) + return &out, nil +} + +type KubeletDiskType string + +const ( + KubeletDiskTypeOS KubeletDiskType = "OS" + KubeletDiskTypeTemporary KubeletDiskType = "Temporary" +) + +func PossibleValuesForKubeletDiskType() []string { + return []string{ + string(KubeletDiskTypeOS), + string(KubeletDiskTypeTemporary), + } +} + +func (s *KubeletDiskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseKubeletDiskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseKubeletDiskType(input string) (*KubeletDiskType, error) { + vals := map[string]KubeletDiskType{ + "os": KubeletDiskTypeOS, + "temporary": KubeletDiskTypeTemporary, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KubeletDiskType(input) + return &out, nil +} + +type KubernetesSupportPlan string + +const ( + KubernetesSupportPlanAKSLongTermSupport KubernetesSupportPlan = "AKSLongTermSupport" + KubernetesSupportPlanKubernetesOfficial KubernetesSupportPlan = "KubernetesOfficial" +) + +func PossibleValuesForKubernetesSupportPlan() []string { + return []string{ + string(KubernetesSupportPlanAKSLongTermSupport), + string(KubernetesSupportPlanKubernetesOfficial), + } +} + +func (s *KubernetesSupportPlan) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseKubernetesSupportPlan(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseKubernetesSupportPlan(input string) (*KubernetesSupportPlan, error) { + vals := map[string]KubernetesSupportPlan{ + "akslongtermsupport": KubernetesSupportPlanAKSLongTermSupport, + "kubernetesofficial": KubernetesSupportPlanKubernetesOfficial, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KubernetesSupportPlan(input) + return &out, nil +} + +type LicenseType string + +const ( + LicenseTypeNone LicenseType = "None" + LicenseTypeWindowsServer LicenseType = "Windows_Server" +) + +func PossibleValuesForLicenseType() []string { + return []string{ + string(LicenseTypeNone), + string(LicenseTypeWindowsServer), + } +} + +func (s *LicenseType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLicenseType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLicenseType(input string) (*LicenseType, error) { + vals := map[string]LicenseType{ + "none": LicenseTypeNone, + "windows_server": LicenseTypeWindowsServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LicenseType(input) + return &out, nil +} + +type LoadBalancerSku string + +const ( + LoadBalancerSkuBasic LoadBalancerSku = "basic" + LoadBalancerSkuStandard LoadBalancerSku = "standard" +) + +func PossibleValuesForLoadBalancerSku() []string { + return []string{ + string(LoadBalancerSkuBasic), + string(LoadBalancerSkuStandard), + } +} + +func (s *LoadBalancerSku) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoadBalancerSku(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoadBalancerSku(input string) (*LoadBalancerSku, error) { + vals := map[string]LoadBalancerSku{ + "basic": LoadBalancerSkuBasic, + "standard": LoadBalancerSkuStandard, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoadBalancerSku(input) + return &out, nil +} + +type ManagedClusterPodIdentityProvisioningState string + +const ( + ManagedClusterPodIdentityProvisioningStateAssigned ManagedClusterPodIdentityProvisioningState = "Assigned" + ManagedClusterPodIdentityProvisioningStateCanceled ManagedClusterPodIdentityProvisioningState = "Canceled" + ManagedClusterPodIdentityProvisioningStateDeleting ManagedClusterPodIdentityProvisioningState = "Deleting" + ManagedClusterPodIdentityProvisioningStateFailed ManagedClusterPodIdentityProvisioningState = "Failed" + ManagedClusterPodIdentityProvisioningStateSucceeded ManagedClusterPodIdentityProvisioningState = "Succeeded" + ManagedClusterPodIdentityProvisioningStateUpdating ManagedClusterPodIdentityProvisioningState = "Updating" +) + +func PossibleValuesForManagedClusterPodIdentityProvisioningState() []string { + return []string{ + string(ManagedClusterPodIdentityProvisioningStateAssigned), + string(ManagedClusterPodIdentityProvisioningStateCanceled), + string(ManagedClusterPodIdentityProvisioningStateDeleting), + string(ManagedClusterPodIdentityProvisioningStateFailed), + string(ManagedClusterPodIdentityProvisioningStateSucceeded), + string(ManagedClusterPodIdentityProvisioningStateUpdating), + } +} + +func (s *ManagedClusterPodIdentityProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseManagedClusterPodIdentityProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseManagedClusterPodIdentityProvisioningState(input string) (*ManagedClusterPodIdentityProvisioningState, error) { + vals := map[string]ManagedClusterPodIdentityProvisioningState{ + "assigned": ManagedClusterPodIdentityProvisioningStateAssigned, + "canceled": ManagedClusterPodIdentityProvisioningStateCanceled, + "deleting": ManagedClusterPodIdentityProvisioningStateDeleting, + "failed": ManagedClusterPodIdentityProvisioningStateFailed, + "succeeded": ManagedClusterPodIdentityProvisioningStateSucceeded, + "updating": ManagedClusterPodIdentityProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ManagedClusterPodIdentityProvisioningState(input) + return &out, nil +} + +type ManagedClusterSKUName string + +const ( + ManagedClusterSKUNameBase ManagedClusterSKUName = "Base" +) + +func PossibleValuesForManagedClusterSKUName() []string { + return []string{ + string(ManagedClusterSKUNameBase), + } +} + +func (s *ManagedClusterSKUName) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseManagedClusterSKUName(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseManagedClusterSKUName(input string) (*ManagedClusterSKUName, error) { + vals := map[string]ManagedClusterSKUName{ + "base": ManagedClusterSKUNameBase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ManagedClusterSKUName(input) + return &out, nil +} + +type ManagedClusterSKUTier string + +const ( + ManagedClusterSKUTierFree ManagedClusterSKUTier = "Free" + ManagedClusterSKUTierPremium ManagedClusterSKUTier = "Premium" + ManagedClusterSKUTierStandard ManagedClusterSKUTier = "Standard" +) + +func PossibleValuesForManagedClusterSKUTier() []string { + return []string{ + string(ManagedClusterSKUTierFree), + string(ManagedClusterSKUTierPremium), + string(ManagedClusterSKUTierStandard), + } +} + +func (s *ManagedClusterSKUTier) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseManagedClusterSKUTier(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseManagedClusterSKUTier(input string) (*ManagedClusterSKUTier, error) { + vals := map[string]ManagedClusterSKUTier{ + "free": ManagedClusterSKUTierFree, + "premium": ManagedClusterSKUTierPremium, + "standard": ManagedClusterSKUTierStandard, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ManagedClusterSKUTier(input) + return &out, nil +} + +type NetworkDataplane string + +const ( + NetworkDataplaneAzure NetworkDataplane = "azure" + NetworkDataplaneCilium NetworkDataplane = "cilium" +) + +func PossibleValuesForNetworkDataplane() []string { + return []string{ + string(NetworkDataplaneAzure), + string(NetworkDataplaneCilium), + } +} + +func (s *NetworkDataplane) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkDataplane(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkDataplane(input string) (*NetworkDataplane, error) { + vals := map[string]NetworkDataplane{ + "azure": NetworkDataplaneAzure, + "cilium": NetworkDataplaneCilium, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkDataplane(input) + return &out, nil +} + +type NetworkMode string + +const ( + NetworkModeBridge NetworkMode = "bridge" + NetworkModeTransparent NetworkMode = "transparent" +) + +func PossibleValuesForNetworkMode() []string { + return []string{ + string(NetworkModeBridge), + string(NetworkModeTransparent), + } +} + +func (s *NetworkMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkMode(input string) (*NetworkMode, error) { + vals := map[string]NetworkMode{ + "bridge": NetworkModeBridge, + "transparent": NetworkModeTransparent, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkMode(input) + return &out, nil +} + +type NetworkPlugin string + +const ( + NetworkPluginAzure NetworkPlugin = "azure" + NetworkPluginKubenet NetworkPlugin = "kubenet" + NetworkPluginNone NetworkPlugin = "none" +) + +func PossibleValuesForNetworkPlugin() []string { + return []string{ + string(NetworkPluginAzure), + string(NetworkPluginKubenet), + string(NetworkPluginNone), + } +} + +func (s *NetworkPlugin) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkPlugin(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkPlugin(input string) (*NetworkPlugin, error) { + vals := map[string]NetworkPlugin{ + "azure": NetworkPluginAzure, + "kubenet": NetworkPluginKubenet, + "none": NetworkPluginNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkPlugin(input) + return &out, nil +} + +type NetworkPluginMode string + +const ( + NetworkPluginModeOverlay NetworkPluginMode = "overlay" +) + +func PossibleValuesForNetworkPluginMode() []string { + return []string{ + string(NetworkPluginModeOverlay), + } +} + +func (s *NetworkPluginMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkPluginMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkPluginMode(input string) (*NetworkPluginMode, error) { + vals := map[string]NetworkPluginMode{ + "overlay": NetworkPluginModeOverlay, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkPluginMode(input) + return &out, nil +} + +type NetworkPolicy string + +const ( + NetworkPolicyAzure NetworkPolicy = "azure" + NetworkPolicyCalico NetworkPolicy = "calico" + NetworkPolicyCilium NetworkPolicy = "cilium" + NetworkPolicyNone NetworkPolicy = "none" +) + +func PossibleValuesForNetworkPolicy() []string { + return []string{ + string(NetworkPolicyAzure), + string(NetworkPolicyCalico), + string(NetworkPolicyCilium), + string(NetworkPolicyNone), + } +} + +func (s *NetworkPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkPolicy(input string) (*NetworkPolicy, error) { + vals := map[string]NetworkPolicy{ + "azure": NetworkPolicyAzure, + "calico": NetworkPolicyCalico, + "cilium": NetworkPolicyCilium, + "none": NetworkPolicyNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkPolicy(input) + return &out, nil +} + +type NginxIngressControllerType string + +const ( + NginxIngressControllerTypeAnnotationControlled NginxIngressControllerType = "AnnotationControlled" + NginxIngressControllerTypeExternal NginxIngressControllerType = "External" + NginxIngressControllerTypeInternal NginxIngressControllerType = "Internal" + NginxIngressControllerTypeNone NginxIngressControllerType = "None" +) + +func PossibleValuesForNginxIngressControllerType() []string { + return []string{ + string(NginxIngressControllerTypeAnnotationControlled), + string(NginxIngressControllerTypeExternal), + string(NginxIngressControllerTypeInternal), + string(NginxIngressControllerTypeNone), + } +} + +func (s *NginxIngressControllerType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNginxIngressControllerType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNginxIngressControllerType(input string) (*NginxIngressControllerType, error) { + vals := map[string]NginxIngressControllerType{ + "annotationcontrolled": NginxIngressControllerTypeAnnotationControlled, + "external": NginxIngressControllerTypeExternal, + "internal": NginxIngressControllerTypeInternal, + "none": NginxIngressControllerTypeNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NginxIngressControllerType(input) + return &out, nil +} + +type NodeOSUpgradeChannel string + +const ( + NodeOSUpgradeChannelNodeImage NodeOSUpgradeChannel = "NodeImage" + NodeOSUpgradeChannelNone NodeOSUpgradeChannel = "None" + NodeOSUpgradeChannelSecurityPatch NodeOSUpgradeChannel = "SecurityPatch" + NodeOSUpgradeChannelUnmanaged NodeOSUpgradeChannel = "Unmanaged" +) + +func PossibleValuesForNodeOSUpgradeChannel() []string { + return []string{ + string(NodeOSUpgradeChannelNodeImage), + string(NodeOSUpgradeChannelNone), + string(NodeOSUpgradeChannelSecurityPatch), + string(NodeOSUpgradeChannelUnmanaged), + } +} + +func (s *NodeOSUpgradeChannel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNodeOSUpgradeChannel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNodeOSUpgradeChannel(input string) (*NodeOSUpgradeChannel, error) { + vals := map[string]NodeOSUpgradeChannel{ + "nodeimage": NodeOSUpgradeChannelNodeImage, + "none": NodeOSUpgradeChannelNone, + "securitypatch": NodeOSUpgradeChannelSecurityPatch, + "unmanaged": NodeOSUpgradeChannelUnmanaged, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NodeOSUpgradeChannel(input) + return &out, nil +} + +type NodeProvisioningDefaultNodePools string + +const ( + NodeProvisioningDefaultNodePoolsAuto NodeProvisioningDefaultNodePools = "Auto" + NodeProvisioningDefaultNodePoolsNone NodeProvisioningDefaultNodePools = "None" +) + +func PossibleValuesForNodeProvisioningDefaultNodePools() []string { + return []string{ + string(NodeProvisioningDefaultNodePoolsAuto), + string(NodeProvisioningDefaultNodePoolsNone), + } +} + +func (s *NodeProvisioningDefaultNodePools) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNodeProvisioningDefaultNodePools(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNodeProvisioningDefaultNodePools(input string) (*NodeProvisioningDefaultNodePools, error) { + vals := map[string]NodeProvisioningDefaultNodePools{ + "auto": NodeProvisioningDefaultNodePoolsAuto, + "none": NodeProvisioningDefaultNodePoolsNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NodeProvisioningDefaultNodePools(input) + return &out, nil +} + +type NodeProvisioningMode string + +const ( + NodeProvisioningModeAuto NodeProvisioningMode = "Auto" + NodeProvisioningModeManual NodeProvisioningMode = "Manual" +) + +func PossibleValuesForNodeProvisioningMode() []string { + return []string{ + string(NodeProvisioningModeAuto), + string(NodeProvisioningModeManual), + } +} + +func (s *NodeProvisioningMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNodeProvisioningMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNodeProvisioningMode(input string) (*NodeProvisioningMode, error) { + vals := map[string]NodeProvisioningMode{ + "auto": NodeProvisioningModeAuto, + "manual": NodeProvisioningModeManual, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NodeProvisioningMode(input) + return &out, nil +} + +type OSDiskType string + +const ( + OSDiskTypeEphemeral OSDiskType = "Ephemeral" + OSDiskTypeManaged OSDiskType = "Managed" +) + +func PossibleValuesForOSDiskType() []string { + return []string{ + string(OSDiskTypeEphemeral), + string(OSDiskTypeManaged), + } +} + +func (s *OSDiskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSDiskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSDiskType(input string) (*OSDiskType, error) { + vals := map[string]OSDiskType{ + "ephemeral": OSDiskTypeEphemeral, + "managed": OSDiskTypeManaged, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSDiskType(input) + return &out, nil +} + +type OSSKU string + +const ( + OSSKUAzureLinux OSSKU = "AzureLinux" + OSSKUCBLMariner OSSKU = "CBLMariner" + OSSKUUbuntu OSSKU = "Ubuntu" + OSSKUUbuntuTwoTwoZeroFour OSSKU = "Ubuntu2204" + OSSKUWindowsTwoZeroOneNine OSSKU = "Windows2019" + OSSKUWindowsTwoZeroTwoTwo OSSKU = "Windows2022" +) + +func PossibleValuesForOSSKU() []string { + return []string{ + string(OSSKUAzureLinux), + string(OSSKUCBLMariner), + string(OSSKUUbuntu), + string(OSSKUUbuntuTwoTwoZeroFour), + string(OSSKUWindowsTwoZeroOneNine), + string(OSSKUWindowsTwoZeroTwoTwo), + } +} + +func (s *OSSKU) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSSKU(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSSKU(input string) (*OSSKU, error) { + vals := map[string]OSSKU{ + "azurelinux": OSSKUAzureLinux, + "cblmariner": OSSKUCBLMariner, + "ubuntu": OSSKUUbuntu, + "ubuntu2204": OSSKUUbuntuTwoTwoZeroFour, + "windows2019": OSSKUWindowsTwoZeroOneNine, + "windows2022": OSSKUWindowsTwoZeroTwoTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSSKU(input) + return &out, nil +} + +type OSType string + +const ( + OSTypeLinux OSType = "Linux" + OSTypeWindows OSType = "Windows" +) + +func PossibleValuesForOSType() []string { + return []string{ + string(OSTypeLinux), + string(OSTypeWindows), + } +} + +func (s *OSType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSType(input string) (*OSType, error) { + vals := map[string]OSType{ + "linux": OSTypeLinux, + "windows": OSTypeWindows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSType(input) + return &out, nil +} + +type OutboundType string + +const ( + OutboundTypeLoadBalancer OutboundType = "loadBalancer" + OutboundTypeManagedNATGateway OutboundType = "managedNATGateway" + OutboundTypeNone OutboundType = "none" + OutboundTypeUserAssignedNATGateway OutboundType = "userAssignedNATGateway" + OutboundTypeUserDefinedRouting OutboundType = "userDefinedRouting" +) + +func PossibleValuesForOutboundType() []string { + return []string{ + string(OutboundTypeLoadBalancer), + string(OutboundTypeManagedNATGateway), + string(OutboundTypeNone), + string(OutboundTypeUserAssignedNATGateway), + string(OutboundTypeUserDefinedRouting), + } +} + +func (s *OutboundType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOutboundType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOutboundType(input string) (*OutboundType, error) { + vals := map[string]OutboundType{ + "loadbalancer": OutboundTypeLoadBalancer, + "managednatgateway": OutboundTypeManagedNATGateway, + "none": OutboundTypeNone, + "userassignednatgateway": OutboundTypeUserAssignedNATGateway, + "userdefinedrouting": OutboundTypeUserDefinedRouting, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OutboundType(input) + return &out, nil +} + +type PodIPAllocationMode string + +const ( + PodIPAllocationModeDynamicIndividual PodIPAllocationMode = "DynamicIndividual" + PodIPAllocationModeStaticBlock PodIPAllocationMode = "StaticBlock" +) + +func PossibleValuesForPodIPAllocationMode() []string { + return []string{ + string(PodIPAllocationModeDynamicIndividual), + string(PodIPAllocationModeStaticBlock), + } +} + +func (s *PodIPAllocationMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePodIPAllocationMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePodIPAllocationMode(input string) (*PodIPAllocationMode, error) { + vals := map[string]PodIPAllocationMode{ + "dynamicindividual": PodIPAllocationModeDynamicIndividual, + "staticblock": PodIPAllocationModeStaticBlock, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PodIPAllocationMode(input) + return &out, nil +} + +type Protocol string + +const ( + ProtocolTCP Protocol = "TCP" + ProtocolUDP Protocol = "UDP" +) + +func PossibleValuesForProtocol() []string { + return []string{ + string(ProtocolTCP), + string(ProtocolUDP), + } +} + +func (s *Protocol) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProtocol(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProtocol(input string) (*Protocol, error) { + vals := map[string]Protocol{ + "tcp": ProtocolTCP, + "udp": ProtocolUDP, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Protocol(input) + return &out, nil +} + +type PublicNetworkAccess string + +const ( + PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" + PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" +) + +func PossibleValuesForPublicNetworkAccess() []string { + return []string{ + string(PublicNetworkAccessDisabled), + string(PublicNetworkAccessEnabled), + } +} + +func (s *PublicNetworkAccess) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePublicNetworkAccess(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePublicNetworkAccess(input string) (*PublicNetworkAccess, error) { + vals := map[string]PublicNetworkAccess{ + "disabled": PublicNetworkAccessDisabled, + "enabled": PublicNetworkAccessEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PublicNetworkAccess(input) + return &out, nil +} + +type RestrictionLevel string + +const ( + RestrictionLevelReadOnly RestrictionLevel = "ReadOnly" + RestrictionLevelUnrestricted RestrictionLevel = "Unrestricted" +) + +func PossibleValuesForRestrictionLevel() []string { + return []string{ + string(RestrictionLevelReadOnly), + string(RestrictionLevelUnrestricted), + } +} + +func (s *RestrictionLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseRestrictionLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseRestrictionLevel(input string) (*RestrictionLevel, error) { + vals := map[string]RestrictionLevel{ + "readonly": RestrictionLevelReadOnly, + "unrestricted": RestrictionLevelUnrestricted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := RestrictionLevel(input) + return &out, nil +} + +type ScaleDownMode string + +const ( + ScaleDownModeDeallocate ScaleDownMode = "Deallocate" + ScaleDownModeDelete ScaleDownMode = "Delete" +) + +func PossibleValuesForScaleDownMode() []string { + return []string{ + string(ScaleDownModeDeallocate), + string(ScaleDownModeDelete), + } +} + +func (s *ScaleDownMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScaleDownMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScaleDownMode(input string) (*ScaleDownMode, error) { + vals := map[string]ScaleDownMode{ + "deallocate": ScaleDownModeDeallocate, + "delete": ScaleDownModeDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleDownMode(input) + return &out, nil +} + +type ScaleSetEvictionPolicy string + +const ( + ScaleSetEvictionPolicyDeallocate ScaleSetEvictionPolicy = "Deallocate" + ScaleSetEvictionPolicyDelete ScaleSetEvictionPolicy = "Delete" +) + +func PossibleValuesForScaleSetEvictionPolicy() []string { + return []string{ + string(ScaleSetEvictionPolicyDeallocate), + string(ScaleSetEvictionPolicyDelete), + } +} + +func (s *ScaleSetEvictionPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScaleSetEvictionPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScaleSetEvictionPolicy(input string) (*ScaleSetEvictionPolicy, error) { + vals := map[string]ScaleSetEvictionPolicy{ + "deallocate": ScaleSetEvictionPolicyDeallocate, + "delete": ScaleSetEvictionPolicyDelete, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetEvictionPolicy(input) + return &out, nil +} + +type ScaleSetPriority string + +const ( + ScaleSetPriorityRegular ScaleSetPriority = "Regular" + ScaleSetPrioritySpot ScaleSetPriority = "Spot" +) + +func PossibleValuesForScaleSetPriority() []string { + return []string{ + string(ScaleSetPriorityRegular), + string(ScaleSetPrioritySpot), + } +} + +func (s *ScaleSetPriority) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScaleSetPriority(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScaleSetPriority(input string) (*ScaleSetPriority, error) { + vals := map[string]ScaleSetPriority{ + "regular": ScaleSetPriorityRegular, + "spot": ScaleSetPrioritySpot, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScaleSetPriority(input) + return &out, nil +} + +type ServiceMeshMode string + +const ( + ServiceMeshModeDisabled ServiceMeshMode = "Disabled" + ServiceMeshModeIstio ServiceMeshMode = "Istio" +) + +func PossibleValuesForServiceMeshMode() []string { + return []string{ + string(ServiceMeshModeDisabled), + string(ServiceMeshModeIstio), + } +} + +func (s *ServiceMeshMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceMeshMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceMeshMode(input string) (*ServiceMeshMode, error) { + vals := map[string]ServiceMeshMode{ + "disabled": ServiceMeshModeDisabled, + "istio": ServiceMeshModeIstio, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceMeshMode(input) + return &out, nil +} + +type UndrainableNodeBehavior string + +const ( + UndrainableNodeBehaviorCordon UndrainableNodeBehavior = "Cordon" + UndrainableNodeBehaviorSchedule UndrainableNodeBehavior = "Schedule" +) + +func PossibleValuesForUndrainableNodeBehavior() []string { + return []string{ + string(UndrainableNodeBehaviorCordon), + string(UndrainableNodeBehaviorSchedule), + } +} + +func (s *UndrainableNodeBehavior) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUndrainableNodeBehavior(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUndrainableNodeBehavior(input string) (*UndrainableNodeBehavior, error) { + vals := map[string]UndrainableNodeBehavior{ + "cordon": UndrainableNodeBehaviorCordon, + "schedule": UndrainableNodeBehaviorSchedule, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UndrainableNodeBehavior(input) + return &out, nil +} + +type UpgradeChannel string + +const ( + UpgradeChannelNodeNegativeimage UpgradeChannel = "node-image" + UpgradeChannelNone UpgradeChannel = "none" + UpgradeChannelPatch UpgradeChannel = "patch" + UpgradeChannelRapid UpgradeChannel = "rapid" + UpgradeChannelStable UpgradeChannel = "stable" +) + +func PossibleValuesForUpgradeChannel() []string { + return []string{ + string(UpgradeChannelNodeNegativeimage), + string(UpgradeChannelNone), + string(UpgradeChannelPatch), + string(UpgradeChannelRapid), + string(UpgradeChannelStable), + } +} + +func (s *UpgradeChannel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpgradeChannel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpgradeChannel(input string) (*UpgradeChannel, error) { + vals := map[string]UpgradeChannel{ + "node-image": UpgradeChannelNodeNegativeimage, + "none": UpgradeChannelNone, + "patch": UpgradeChannelPatch, + "rapid": UpgradeChannelRapid, + "stable": UpgradeChannelStable, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpgradeChannel(input) + return &out, nil +} + +type WorkloadRuntime string + +const ( + WorkloadRuntimeOCIContainer WorkloadRuntime = "OCIContainer" + WorkloadRuntimeWasmWasi WorkloadRuntime = "WasmWasi" +) + +func PossibleValuesForWorkloadRuntime() []string { + return []string{ + string(WorkloadRuntimeOCIContainer), + string(WorkloadRuntimeWasmWasi), + } +} + +func (s *WorkloadRuntime) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseWorkloadRuntime(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseWorkloadRuntime(input string) (*WorkloadRuntime, error) { + vals := map[string]WorkloadRuntime{ + "ocicontainer": WorkloadRuntimeOCIContainer, + "wasmwasi": WorkloadRuntimeWasmWasi, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := WorkloadRuntime(input) + return &out, nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_accessprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_accessprofile.go new file mode 100644 index 00000000000..0cc903a7fde --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_accessprofile.go @@ -0,0 +1,139 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&AccessProfileId{}) +} + +var _ resourceids.ResourceId = &AccessProfileId{} + +// AccessProfileId is a struct representing the Resource ID for a Access Profile +type AccessProfileId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + AccessProfileName string +} + +// NewAccessProfileID returns a new AccessProfileId struct +func NewAccessProfileID(subscriptionId string, resourceGroupName string, managedClusterName string, accessProfileName string) AccessProfileId { + return AccessProfileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + AccessProfileName: accessProfileName, + } +} + +// ParseAccessProfileID parses 'input' into a AccessProfileId +func ParseAccessProfileID(input string) (*AccessProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&AccessProfileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AccessProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseAccessProfileIDInsensitively parses 'input' case-insensitively into a AccessProfileId +// note: this method should only be used for API response data and not user input +func ParseAccessProfileIDInsensitively(input string) (*AccessProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&AccessProfileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := AccessProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *AccessProfileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.AccessProfileName, ok = input.Parsed["accessProfileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "accessProfileName", input) + } + + return nil +} + +// ValidateAccessProfileID checks that 'input' can be parsed as a Access Profile ID +func ValidateAccessProfileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseAccessProfileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Access Profile ID +func (id AccessProfileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/accessProfiles/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.AccessProfileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Access Profile ID +func (id AccessProfileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticAccessProfiles", "accessProfiles", "accessProfiles"), + resourceids.UserSpecifiedSegment("accessProfileName", "accessProfileName"), + } +} + +// String returns a human-readable description of this Access Profile ID +func (id AccessProfileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Access Profile Name: %q", id.AccessProfileName), + } + return fmt.Sprintf("Access Profile (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_accessprofile_test.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_accessprofile_test.go new file mode 100644 index 00000000000..9d2f96670dd --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_accessprofile_test.go @@ -0,0 +1,327 @@ +package managedclusters + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &AccessProfileId{} + +func TestNewAccessProfileID(t *testing.T) { + id := NewAccessProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "accessProfileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.AccessProfileName != "accessProfileName" { + t.Fatalf("Expected %q but got %q for Segment 'AccessProfileName'", id.AccessProfileName, "accessProfileName") + } +} + +func TestFormatAccessProfileID(t *testing.T) { + actual := NewAccessProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "accessProfileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/accessProfiles/accessProfileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseAccessProfileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AccessProfileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/accessProfiles", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/accessProfiles/accessProfileName", + Expected: &AccessProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AccessProfileName: "accessProfileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/accessProfiles/accessProfileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAccessProfileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AccessProfileName != v.Expected.AccessProfileName { + t.Fatalf("Expected %q but got %q for AccessProfileName", v.Expected.AccessProfileName, actual.AccessProfileName) + } + + } +} + +func TestParseAccessProfileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *AccessProfileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/accessProfiles", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aCcEsSpRoFiLeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/accessProfiles/accessProfileName", + Expected: &AccessProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + AccessProfileName: "accessProfileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/accessProfiles/accessProfileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aCcEsSpRoFiLeS/aCcEsSpRoFiLeNaMe", + Expected: &AccessProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + AccessProfileName: "aCcEsSpRoFiLeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/aCcEsSpRoFiLeS/aCcEsSpRoFiLeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseAccessProfileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.AccessProfileName != v.Expected.AccessProfileName { + t.Fatalf("Expected %q but got %q for AccessProfileName", v.Expected.AccessProfileName, actual.AccessProfileName) + } + + } +} + +func TestSegmentsForAccessProfileId(t *testing.T) { + segments := AccessProfileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("AccessProfileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_commandresult.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_commandresult.go new file mode 100644 index 00000000000..737a71e0621 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_commandresult.go @@ -0,0 +1,139 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&CommandResultId{}) +} + +var _ resourceids.ResourceId = &CommandResultId{} + +// CommandResultId is a struct representing the Resource ID for a Command Result +type CommandResultId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + CommandId string +} + +// NewCommandResultID returns a new CommandResultId struct +func NewCommandResultID(subscriptionId string, resourceGroupName string, managedClusterName string, commandId string) CommandResultId { + return CommandResultId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + CommandId: commandId, + } +} + +// ParseCommandResultID parses 'input' into a CommandResultId +func ParseCommandResultID(input string) (*CommandResultId, error) { + parser := resourceids.NewParserFromResourceIdType(&CommandResultId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := CommandResultId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseCommandResultIDInsensitively parses 'input' case-insensitively into a CommandResultId +// note: this method should only be used for API response data and not user input +func ParseCommandResultIDInsensitively(input string) (*CommandResultId, error) { + parser := resourceids.NewParserFromResourceIdType(&CommandResultId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := CommandResultId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *CommandResultId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.CommandId, ok = input.Parsed["commandId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "commandId", input) + } + + return nil +} + +// ValidateCommandResultID checks that 'input' can be parsed as a Command Result ID +func ValidateCommandResultID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseCommandResultID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Command Result ID +func (id CommandResultId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/commandResults/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.CommandId) +} + +// Segments returns a slice of Resource ID Segments which comprise this Command Result ID +func (id CommandResultId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticCommandResults", "commandResults", "commandResults"), + resourceids.UserSpecifiedSegment("commandId", "commandId"), + } +} + +// String returns a human-readable description of this Command Result ID +func (id CommandResultId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Command: %q", id.CommandId), + } + return fmt.Sprintf("Command Result (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_commandresult_test.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_commandresult_test.go new file mode 100644 index 00000000000..0c55ce098fa --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_commandresult_test.go @@ -0,0 +1,327 @@ +package managedclusters + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &CommandResultId{} + +func TestNewCommandResultID(t *testing.T) { + id := NewCommandResultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "commandId") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.CommandId != "commandId" { + t.Fatalf("Expected %q but got %q for Segment 'CommandId'", id.CommandId, "commandId") + } +} + +func TestFormatCommandResultID(t *testing.T) { + actual := NewCommandResultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "commandId").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/commandResults/commandId" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseCommandResultID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CommandResultId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/commandResults", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/commandResults/commandId", + Expected: &CommandResultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + CommandId: "commandId", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/commandResults/commandId/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCommandResultID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.CommandId != v.Expected.CommandId { + t.Fatalf("Expected %q but got %q for CommandId", v.Expected.CommandId, actual.CommandId) + } + + } +} + +func TestParseCommandResultIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CommandResultId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/commandResults", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/cOmMaNdReSuLtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/commandResults/commandId", + Expected: &CommandResultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + CommandId: "commandId", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/commandResults/commandId/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/cOmMaNdReSuLtS/cOmMaNdId", + Expected: &CommandResultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + CommandId: "cOmMaNdId", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/cOmMaNdReSuLtS/cOmMaNdId/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCommandResultIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.CommandId != v.Expected.CommandId { + t.Fatalf("Expected %q but got %q for CommandId", v.Expected.CommandId, actual.CommandId) + } + + } +} + +func TestSegmentsForCommandResultId(t *testing.T) { + segments := CommandResultId{}.Segments() + if len(segments) == 0 { + t.Fatalf("CommandResultId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_location.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_location.go new file mode 100644 index 00000000000..79aa9b51c07 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_location.go @@ -0,0 +1,121 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&LocationId{}) +} + +var _ resourceids.ResourceId = &LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + LocationName string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *LocationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + return nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.ContainerService/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_location_test.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_location_test.go new file mode 100644 index 00000000000..db0f4f67412 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_location_test.go @@ -0,0 +1,237 @@ +package managedclusters + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_meshrevisionprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshrevisionprofile.go new file mode 100644 index 00000000000..009771d0a59 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshrevisionprofile.go @@ -0,0 +1,130 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&MeshRevisionProfileId{}) +} + +var _ resourceids.ResourceId = &MeshRevisionProfileId{} + +// MeshRevisionProfileId is a struct representing the Resource ID for a Mesh Revision Profile +type MeshRevisionProfileId struct { + SubscriptionId string + LocationName string + MeshRevisionProfileName string +} + +// NewMeshRevisionProfileID returns a new MeshRevisionProfileId struct +func NewMeshRevisionProfileID(subscriptionId string, locationName string, meshRevisionProfileName string) MeshRevisionProfileId { + return MeshRevisionProfileId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + MeshRevisionProfileName: meshRevisionProfileName, + } +} + +// ParseMeshRevisionProfileID parses 'input' into a MeshRevisionProfileId +func ParseMeshRevisionProfileID(input string) (*MeshRevisionProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&MeshRevisionProfileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MeshRevisionProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseMeshRevisionProfileIDInsensitively parses 'input' case-insensitively into a MeshRevisionProfileId +// note: this method should only be used for API response data and not user input +func ParseMeshRevisionProfileIDInsensitively(input string) (*MeshRevisionProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&MeshRevisionProfileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MeshRevisionProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *MeshRevisionProfileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + if id.MeshRevisionProfileName, ok = input.Parsed["meshRevisionProfileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "meshRevisionProfileName", input) + } + + return nil +} + +// ValidateMeshRevisionProfileID checks that 'input' can be parsed as a Mesh Revision Profile ID +func ValidateMeshRevisionProfileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMeshRevisionProfileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Mesh Revision Profile ID +func (id MeshRevisionProfileId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.ContainerService/locations/%s/meshRevisionProfiles/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName, id.MeshRevisionProfileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Mesh Revision Profile ID +func (id MeshRevisionProfileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + resourceids.StaticSegment("staticMeshRevisionProfiles", "meshRevisionProfiles", "meshRevisionProfiles"), + resourceids.UserSpecifiedSegment("meshRevisionProfileName", "meshRevisionProfileName"), + } +} + +// String returns a human-readable description of this Mesh Revision Profile ID +func (id MeshRevisionProfileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + fmt.Sprintf("Mesh Revision Profile Name: %q", id.MeshRevisionProfileName), + } + return fmt.Sprintf("Mesh Revision Profile (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_meshrevisionprofile_test.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshrevisionprofile_test.go new file mode 100644 index 00000000000..a4e7a52ef61 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshrevisionprofile_test.go @@ -0,0 +1,282 @@ +package managedclusters + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &MeshRevisionProfileId{} + +func TestNewMeshRevisionProfileID(t *testing.T) { + id := NewMeshRevisionProfileID("12345678-1234-9876-4563-123456789012", "locationName", "meshRevisionProfileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } + + if id.MeshRevisionProfileName != "meshRevisionProfileName" { + t.Fatalf("Expected %q but got %q for Segment 'MeshRevisionProfileName'", id.MeshRevisionProfileName, "meshRevisionProfileName") + } +} + +func TestFormatMeshRevisionProfileID(t *testing.T) { + actual := NewMeshRevisionProfileID("12345678-1234-9876-4563-123456789012", "locationName", "meshRevisionProfileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/meshRevisionProfiles/meshRevisionProfileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseMeshRevisionProfileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MeshRevisionProfileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/meshRevisionProfiles", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/meshRevisionProfiles/meshRevisionProfileName", + Expected: &MeshRevisionProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + MeshRevisionProfileName: "meshRevisionProfileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/meshRevisionProfiles/meshRevisionProfileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMeshRevisionProfileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + if actual.MeshRevisionProfileName != v.Expected.MeshRevisionProfileName { + t.Fatalf("Expected %q but got %q for MeshRevisionProfileName", v.Expected.MeshRevisionProfileName, actual.MeshRevisionProfileName) + } + + } +} + +func TestParseMeshRevisionProfileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MeshRevisionProfileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/meshRevisionProfiles", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE/mEsHrEvIsIoNpRoFiLeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/meshRevisionProfiles/meshRevisionProfileName", + Expected: &MeshRevisionProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + MeshRevisionProfileName: "meshRevisionProfileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/meshRevisionProfiles/meshRevisionProfileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE/mEsHrEvIsIoNpRoFiLeS/mEsHrEvIsIoNpRoFiLeNaMe", + Expected: &MeshRevisionProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + MeshRevisionProfileName: "mEsHrEvIsIoNpRoFiLeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE/mEsHrEvIsIoNpRoFiLeS/mEsHrEvIsIoNpRoFiLeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMeshRevisionProfileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + if actual.MeshRevisionProfileName != v.Expected.MeshRevisionProfileName { + t.Fatalf("Expected %q but got %q for MeshRevisionProfileName", v.Expected.MeshRevisionProfileName, actual.MeshRevisionProfileName) + } + + } +} + +func TestSegmentsForMeshRevisionProfileId(t *testing.T) { + segments := MeshRevisionProfileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("MeshRevisionProfileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_meshupgradeprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshupgradeprofile.go new file mode 100644 index 00000000000..c33efc10c79 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshupgradeprofile.go @@ -0,0 +1,139 @@ +package managedclusters + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&MeshUpgradeProfileId{}) +} + +var _ resourceids.ResourceId = &MeshUpgradeProfileId{} + +// MeshUpgradeProfileId is a struct representing the Resource ID for a Mesh Upgrade Profile +type MeshUpgradeProfileId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + MeshUpgradeProfileName string +} + +// NewMeshUpgradeProfileID returns a new MeshUpgradeProfileId struct +func NewMeshUpgradeProfileID(subscriptionId string, resourceGroupName string, managedClusterName string, meshUpgradeProfileName string) MeshUpgradeProfileId { + return MeshUpgradeProfileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + MeshUpgradeProfileName: meshUpgradeProfileName, + } +} + +// ParseMeshUpgradeProfileID parses 'input' into a MeshUpgradeProfileId +func ParseMeshUpgradeProfileID(input string) (*MeshUpgradeProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&MeshUpgradeProfileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MeshUpgradeProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseMeshUpgradeProfileIDInsensitively parses 'input' case-insensitively into a MeshUpgradeProfileId +// note: this method should only be used for API response data and not user input +func ParseMeshUpgradeProfileIDInsensitively(input string) (*MeshUpgradeProfileId, error) { + parser := resourceids.NewParserFromResourceIdType(&MeshUpgradeProfileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MeshUpgradeProfileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *MeshUpgradeProfileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.MeshUpgradeProfileName, ok = input.Parsed["meshUpgradeProfileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "meshUpgradeProfileName", input) + } + + return nil +} + +// ValidateMeshUpgradeProfileID checks that 'input' can be parsed as a Mesh Upgrade Profile ID +func ValidateMeshUpgradeProfileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMeshUpgradeProfileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Mesh Upgrade Profile ID +func (id MeshUpgradeProfileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/meshUpgradeProfiles/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.MeshUpgradeProfileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Mesh Upgrade Profile ID +func (id MeshUpgradeProfileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticMeshUpgradeProfiles", "meshUpgradeProfiles", "meshUpgradeProfiles"), + resourceids.UserSpecifiedSegment("meshUpgradeProfileName", "meshUpgradeProfileName"), + } +} + +// String returns a human-readable description of this Mesh Upgrade Profile ID +func (id MeshUpgradeProfileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Mesh Upgrade Profile Name: %q", id.MeshUpgradeProfileName), + } + return fmt.Sprintf("Mesh Upgrade Profile (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/id_meshupgradeprofile_test.go b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshupgradeprofile_test.go new file mode 100644 index 00000000000..eac2f86407e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/id_meshupgradeprofile_test.go @@ -0,0 +1,327 @@ +package managedclusters + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &MeshUpgradeProfileId{} + +func TestNewMeshUpgradeProfileID(t *testing.T) { + id := NewMeshUpgradeProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "meshUpgradeProfileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.MeshUpgradeProfileName != "meshUpgradeProfileName" { + t.Fatalf("Expected %q but got %q for Segment 'MeshUpgradeProfileName'", id.MeshUpgradeProfileName, "meshUpgradeProfileName") + } +} + +func TestFormatMeshUpgradeProfileID(t *testing.T) { + actual := NewMeshUpgradeProfileID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "meshUpgradeProfileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/meshUpgradeProfiles/meshUpgradeProfileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseMeshUpgradeProfileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MeshUpgradeProfileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/meshUpgradeProfiles", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/meshUpgradeProfiles/meshUpgradeProfileName", + Expected: &MeshUpgradeProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + MeshUpgradeProfileName: "meshUpgradeProfileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/meshUpgradeProfiles/meshUpgradeProfileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMeshUpgradeProfileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.MeshUpgradeProfileName != v.Expected.MeshUpgradeProfileName { + t.Fatalf("Expected %q but got %q for MeshUpgradeProfileName", v.Expected.MeshUpgradeProfileName, actual.MeshUpgradeProfileName) + } + + } +} + +func TestParseMeshUpgradeProfileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MeshUpgradeProfileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/meshUpgradeProfiles", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/mEsHuPgRaDePrOfIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/meshUpgradeProfiles/meshUpgradeProfileName", + Expected: &MeshUpgradeProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + MeshUpgradeProfileName: "meshUpgradeProfileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/meshUpgradeProfiles/meshUpgradeProfileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/mEsHuPgRaDePrOfIlEs/mEsHuPgRaDePrOfIlEnAmE", + Expected: &MeshUpgradeProfileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + MeshUpgradeProfileName: "mEsHuPgRaDePrOfIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/mEsHuPgRaDePrOfIlEs/mEsHuPgRaDePrOfIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMeshUpgradeProfileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.MeshUpgradeProfileName != v.Expected.MeshUpgradeProfileName { + t.Fatalf("Expected %q but got %q for MeshUpgradeProfileName", v.Expected.MeshUpgradeProfileName, actual.MeshUpgradeProfileName) + } + + } +} + +func TestSegmentsForMeshUpgradeProfileId(t *testing.T) { + segments := MeshUpgradeProfileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("MeshUpgradeProfileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_abortlatestoperation.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_abortlatestoperation.go new file mode 100644 index 00000000000..a37480f49c9 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_abortlatestoperation.go @@ -0,0 +1,71 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AbortLatestOperationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// AbortLatestOperation ... +func (c ManagedClustersClient) AbortLatestOperation(ctx context.Context, id commonids.KubernetesClusterId) (result AbortLatestOperationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/abort", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AbortLatestOperationThenPoll performs AbortLatestOperation then polls until it's completed +func (c ManagedClustersClient) AbortLatestOperationThenPoll(ctx context.Context, id commonids.KubernetesClusterId) error { + result, err := c.AbortLatestOperation(ctx, id) + if err != nil { + return fmt.Errorf("performing AbortLatestOperation: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AbortLatestOperation: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_createorupdate.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_createorupdate.go new file mode 100644 index 00000000000..ee535475169 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_createorupdate.go @@ -0,0 +1,109 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *ManagedCluster +} + +type CreateOrUpdateOperationOptions struct { + IfMatch *string + IfNoneMatch *string +} + +func DefaultCreateOrUpdateOperationOptions() CreateOrUpdateOperationOptions { + return CreateOrUpdateOperationOptions{} +} + +func (o CreateOrUpdateOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + if o.IfMatch != nil { + out.Append("If-Match", fmt.Sprintf("%v", *o.IfMatch)) + } + if o.IfNoneMatch != nil { + out.Append("If-None-Match", fmt.Sprintf("%v", *o.IfNoneMatch)) + } + return &out +} + +func (o CreateOrUpdateOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o CreateOrUpdateOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + + return &out +} + +// CreateOrUpdate ... +func (c ManagedClustersClient) CreateOrUpdate(ctx context.Context, id commonids.KubernetesClusterId, input ManagedCluster, options CreateOrUpdateOperationOptions) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c ManagedClustersClient) CreateOrUpdateThenPoll(ctx context.Context, id commonids.KubernetesClusterId, input ManagedCluster, options CreateOrUpdateOperationOptions) error { + result, err := c.CreateOrUpdate(ctx, id, input, options) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_delete.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_delete.go new file mode 100644 index 00000000000..e73ea43ac3f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_delete.go @@ -0,0 +1,100 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type DeleteOperationOptions struct { + IfMatch *string +} + +func DefaultDeleteOperationOptions() DeleteOperationOptions { + return DeleteOperationOptions{} +} + +func (o DeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + if o.IfMatch != nil { + out.Append("If-Match", fmt.Sprintf("%v", *o.IfMatch)) + } + return &out +} + +func (o DeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o DeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + + return &out +} + +// Delete ... +func (c ManagedClustersClient) Delete(ctx context.Context, id commonids.KubernetesClusterId, options DeleteOperationOptions) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c ManagedClustersClient) DeleteThenPoll(ctx context.Context, id commonids.KubernetesClusterId, options DeleteOperationOptions) error { + result, err := c.Delete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_get.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_get.go new file mode 100644 index 00000000000..5c5b9cd89b8 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_get.go @@ -0,0 +1,54 @@ +package managedclusters + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ManagedCluster +} + +// Get ... +func (c ManagedClustersClient) Get(ctx context.Context, id commonids.KubernetesClusterId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ManagedCluster + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_getaccessprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_getaccessprofile.go new file mode 100644 index 00000000000..99bbda7cca5 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_getaccessprofile.go @@ -0,0 +1,54 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetAccessProfileOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ManagedClusterAccessProfile +} + +// GetAccessProfile ... +func (c ManagedClustersClient) GetAccessProfile(ctx context.Context, id AccessProfileId) (result GetAccessProfileOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/listCredential", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ManagedClusterAccessProfile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_getcommandresult.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_getcommandresult.go new file mode 100644 index 00000000000..9cd6f99a5f9 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_getcommandresult.go @@ -0,0 +1,54 @@ +package managedclusters + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetCommandResultOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *RunCommandResult +} + +// GetCommandResult ... +func (c ManagedClustersClient) GetCommandResult(ctx context.Context, id CommandResultId) (result GetCommandResultOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model RunCommandResult + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_getmeshrevisionprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_getmeshrevisionprofile.go new file mode 100644 index 00000000000..aba6637b834 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_getmeshrevisionprofile.go @@ -0,0 +1,53 @@ +package managedclusters + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetMeshRevisionProfileOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *MeshRevisionProfile +} + +// GetMeshRevisionProfile ... +func (c ManagedClustersClient) GetMeshRevisionProfile(ctx context.Context, id MeshRevisionProfileId) (result GetMeshRevisionProfileOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model MeshRevisionProfile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_getmeshupgradeprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_getmeshupgradeprofile.go new file mode 100644 index 00000000000..536044f5875 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_getmeshupgradeprofile.go @@ -0,0 +1,53 @@ +package managedclusters + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetMeshUpgradeProfileOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *MeshUpgradeProfile +} + +// GetMeshUpgradeProfile ... +func (c ManagedClustersClient) GetMeshUpgradeProfile(ctx context.Context, id MeshUpgradeProfileId) (result GetMeshUpgradeProfileOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model MeshUpgradeProfile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_getupgradeprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_getupgradeprofile.go new file mode 100644 index 00000000000..b5a5533d9e3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_getupgradeprofile.go @@ -0,0 +1,55 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUpgradeProfileOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ManagedClusterUpgradeProfile +} + +// GetUpgradeProfile ... +func (c ManagedClustersClient) GetUpgradeProfile(ctx context.Context, id commonids.KubernetesClusterId) (result GetUpgradeProfileOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/upgradeProfiles/default", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ManagedClusterUpgradeProfile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_list.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_list.go new file mode 100644 index 00000000000..026d6c9e193 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_list.go @@ -0,0 +1,106 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ManagedCluster +} + +type ListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ManagedCluster +} + +type ListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// List ... +func (c ManagedClustersClient) List(ctx context.Context, id commonids.SubscriptionId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.ContainerService/managedClusters", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ManagedCluster `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListComplete retrieves all the results into a single object +func (c ManagedClustersClient) ListComplete(ctx context.Context, id commonids.SubscriptionId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, ManagedClusterOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ManagedClustersClient) ListCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate ManagedClusterOperationPredicate) (result ListCompleteResult, err error) { + items := make([]ManagedCluster, 0) + + resp, err := c.List(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listbyresourcegroup.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listbyresourcegroup.go new file mode 100644 index 00000000000..3243b394520 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listbyresourcegroup.go @@ -0,0 +1,106 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ManagedCluster +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []ManagedCluster +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c ManagedClustersClient) ListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.ContainerService/managedClusters", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ManagedCluster `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c ManagedClustersClient) ListByResourceGroupComplete(ctx context.Context, id commonids.ResourceGroupId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, ManagedClusterOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ManagedClustersClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate ManagedClusterOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]ManagedCluster, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listclusteradmincredentials.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listclusteradmincredentials.go new file mode 100644 index 00000000000..7ccb6f0d6eb --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listclusteradmincredentials.go @@ -0,0 +1,84 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListClusterAdminCredentialsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *CredentialResults +} + +type ListClusterAdminCredentialsOperationOptions struct { + ServerFqdn *string +} + +func DefaultListClusterAdminCredentialsOperationOptions() ListClusterAdminCredentialsOperationOptions { + return ListClusterAdminCredentialsOperationOptions{} +} + +func (o ListClusterAdminCredentialsOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ListClusterAdminCredentialsOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ListClusterAdminCredentialsOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.ServerFqdn != nil { + out.Append("server-fqdn", fmt.Sprintf("%v", *o.ServerFqdn)) + } + return &out +} + +// ListClusterAdminCredentials ... +func (c ManagedClustersClient) ListClusterAdminCredentials(ctx context.Context, id commonids.KubernetesClusterId, options ListClusterAdminCredentialsOperationOptions) (result ListClusterAdminCredentialsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + OptionsObject: options, + Path: fmt.Sprintf("%s/listClusterAdminCredential", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model CredentialResults + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listclustermonitoringusercredentials.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listclustermonitoringusercredentials.go new file mode 100644 index 00000000000..cdf8383752e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listclustermonitoringusercredentials.go @@ -0,0 +1,84 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListClusterMonitoringUserCredentialsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *CredentialResults +} + +type ListClusterMonitoringUserCredentialsOperationOptions struct { + ServerFqdn *string +} + +func DefaultListClusterMonitoringUserCredentialsOperationOptions() ListClusterMonitoringUserCredentialsOperationOptions { + return ListClusterMonitoringUserCredentialsOperationOptions{} +} + +func (o ListClusterMonitoringUserCredentialsOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ListClusterMonitoringUserCredentialsOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ListClusterMonitoringUserCredentialsOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.ServerFqdn != nil { + out.Append("server-fqdn", fmt.Sprintf("%v", *o.ServerFqdn)) + } + return &out +} + +// ListClusterMonitoringUserCredentials ... +func (c ManagedClustersClient) ListClusterMonitoringUserCredentials(ctx context.Context, id commonids.KubernetesClusterId, options ListClusterMonitoringUserCredentialsOperationOptions) (result ListClusterMonitoringUserCredentialsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + OptionsObject: options, + Path: fmt.Sprintf("%s/listClusterMonitoringUserCredential", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model CredentialResults + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listclusterusercredentials.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listclusterusercredentials.go new file mode 100644 index 00000000000..ce168c171aa --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listclusterusercredentials.go @@ -0,0 +1,88 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListClusterUserCredentialsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *CredentialResults +} + +type ListClusterUserCredentialsOperationOptions struct { + Format *Format + ServerFqdn *string +} + +func DefaultListClusterUserCredentialsOperationOptions() ListClusterUserCredentialsOperationOptions { + return ListClusterUserCredentialsOperationOptions{} +} + +func (o ListClusterUserCredentialsOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ListClusterUserCredentialsOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ListClusterUserCredentialsOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Format != nil { + out.Append("format", fmt.Sprintf("%v", *o.Format)) + } + if o.ServerFqdn != nil { + out.Append("server-fqdn", fmt.Sprintf("%v", *o.ServerFqdn)) + } + return &out +} + +// ListClusterUserCredentials ... +func (c ManagedClustersClient) ListClusterUserCredentials(ctx context.Context, id commonids.KubernetesClusterId, options ListClusterUserCredentialsOperationOptions) (result ListClusterUserCredentialsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + OptionsObject: options, + Path: fmt.Sprintf("%s/listClusterUserCredential", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model CredentialResults + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listkubernetesversions.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listkubernetesversions.go new file mode 100644 index 00000000000..286125762cd --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listkubernetesversions.go @@ -0,0 +1,54 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListKubernetesVersionsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *KubernetesVersionListResult +} + +// ListKubernetesVersions ... +func (c ManagedClustersClient) ListKubernetesVersions(ctx context.Context, id LocationId) (result ListKubernetesVersionsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/kubernetesVersions", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model KubernetesVersionListResult + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listmeshrevisionprofiles.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listmeshrevisionprofiles.go new file mode 100644 index 00000000000..0172addb23c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listmeshrevisionprofiles.go @@ -0,0 +1,105 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListMeshRevisionProfilesOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]MeshRevisionProfile +} + +type ListMeshRevisionProfilesCompleteResult struct { + LatestHttpResponse *http.Response + Items []MeshRevisionProfile +} + +type ListMeshRevisionProfilesCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListMeshRevisionProfilesCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListMeshRevisionProfiles ... +func (c ManagedClustersClient) ListMeshRevisionProfiles(ctx context.Context, id LocationId) (result ListMeshRevisionProfilesOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListMeshRevisionProfilesCustomPager{}, + Path: fmt.Sprintf("%s/meshRevisionProfiles", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]MeshRevisionProfile `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListMeshRevisionProfilesComplete retrieves all the results into a single object +func (c ManagedClustersClient) ListMeshRevisionProfilesComplete(ctx context.Context, id LocationId) (ListMeshRevisionProfilesCompleteResult, error) { + return c.ListMeshRevisionProfilesCompleteMatchingPredicate(ctx, id, MeshRevisionProfileOperationPredicate{}) +} + +// ListMeshRevisionProfilesCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ManagedClustersClient) ListMeshRevisionProfilesCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate MeshRevisionProfileOperationPredicate) (result ListMeshRevisionProfilesCompleteResult, err error) { + items := make([]MeshRevisionProfile, 0) + + resp, err := c.ListMeshRevisionProfiles(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListMeshRevisionProfilesCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listmeshupgradeprofiles.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listmeshupgradeprofiles.go new file mode 100644 index 00000000000..b73c93f81a9 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listmeshupgradeprofiles.go @@ -0,0 +1,106 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListMeshUpgradeProfilesOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]MeshUpgradeProfile +} + +type ListMeshUpgradeProfilesCompleteResult struct { + LatestHttpResponse *http.Response + Items []MeshUpgradeProfile +} + +type ListMeshUpgradeProfilesCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListMeshUpgradeProfilesCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListMeshUpgradeProfiles ... +func (c ManagedClustersClient) ListMeshUpgradeProfiles(ctx context.Context, id commonids.KubernetesClusterId) (result ListMeshUpgradeProfilesOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListMeshUpgradeProfilesCustomPager{}, + Path: fmt.Sprintf("%s/meshUpgradeProfiles", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]MeshUpgradeProfile `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListMeshUpgradeProfilesComplete retrieves all the results into a single object +func (c ManagedClustersClient) ListMeshUpgradeProfilesComplete(ctx context.Context, id commonids.KubernetesClusterId) (ListMeshUpgradeProfilesCompleteResult, error) { + return c.ListMeshUpgradeProfilesCompleteMatchingPredicate(ctx, id, MeshUpgradeProfileOperationPredicate{}) +} + +// ListMeshUpgradeProfilesCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ManagedClustersClient) ListMeshUpgradeProfilesCompleteMatchingPredicate(ctx context.Context, id commonids.KubernetesClusterId, predicate MeshUpgradeProfileOperationPredicate) (result ListMeshUpgradeProfilesCompleteResult, err error) { + items := make([]MeshUpgradeProfile, 0) + + resp, err := c.ListMeshUpgradeProfiles(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListMeshUpgradeProfilesCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_listoutboundnetworkdependenciesendpoints.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_listoutboundnetworkdependenciesendpoints.go new file mode 100644 index 00000000000..21283e57ea3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_listoutboundnetworkdependenciesendpoints.go @@ -0,0 +1,106 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOutboundNetworkDependenciesEndpointsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]OutboundEnvironmentEndpoint +} + +type ListOutboundNetworkDependenciesEndpointsCompleteResult struct { + LatestHttpResponse *http.Response + Items []OutboundEnvironmentEndpoint +} + +type ListOutboundNetworkDependenciesEndpointsCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListOutboundNetworkDependenciesEndpointsCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListOutboundNetworkDependenciesEndpoints ... +func (c ManagedClustersClient) ListOutboundNetworkDependenciesEndpoints(ctx context.Context, id commonids.KubernetesClusterId) (result ListOutboundNetworkDependenciesEndpointsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListOutboundNetworkDependenciesEndpointsCustomPager{}, + Path: fmt.Sprintf("%s/outboundNetworkDependenciesEndpoints", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]OutboundEnvironmentEndpoint `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListOutboundNetworkDependenciesEndpointsComplete retrieves all the results into a single object +func (c ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsComplete(ctx context.Context, id commonids.KubernetesClusterId) (ListOutboundNetworkDependenciesEndpointsCompleteResult, error) { + return c.ListOutboundNetworkDependenciesEndpointsCompleteMatchingPredicate(ctx, id, OutboundEnvironmentEndpointOperationPredicate{}) +} + +// ListOutboundNetworkDependenciesEndpointsCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ManagedClustersClient) ListOutboundNetworkDependenciesEndpointsCompleteMatchingPredicate(ctx context.Context, id commonids.KubernetesClusterId, predicate OutboundEnvironmentEndpointOperationPredicate) (result ListOutboundNetworkDependenciesEndpointsCompleteResult, err error) { + items := make([]OutboundEnvironmentEndpoint, 0) + + resp, err := c.ListOutboundNetworkDependenciesEndpoints(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListOutboundNetworkDependenciesEndpointsCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_resetaadprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_resetaadprofile.go new file mode 100644 index 00000000000..abdbb59c018 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_resetaadprofile.go @@ -0,0 +1,75 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResetAADProfileOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ResetAADProfile ... +func (c ManagedClustersClient) ResetAADProfile(ctx context.Context, id commonids.KubernetesClusterId, input ManagedClusterAADProfile) (result ResetAADProfileOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/resetAADProfile", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ResetAADProfileThenPoll performs ResetAADProfile then polls until it's completed +func (c ManagedClustersClient) ResetAADProfileThenPoll(ctx context.Context, id commonids.KubernetesClusterId, input ManagedClusterAADProfile) error { + result, err := c.ResetAADProfile(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ResetAADProfile: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ResetAADProfile: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_resetserviceprincipalprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_resetserviceprincipalprofile.go new file mode 100644 index 00000000000..54589acaf03 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_resetserviceprincipalprofile.go @@ -0,0 +1,75 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResetServicePrincipalProfileOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ResetServicePrincipalProfile ... +func (c ManagedClustersClient) ResetServicePrincipalProfile(ctx context.Context, id commonids.KubernetesClusterId, input ManagedClusterServicePrincipalProfile) (result ResetServicePrincipalProfileOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/resetServicePrincipalProfile", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ResetServicePrincipalProfileThenPoll performs ResetServicePrincipalProfile then polls until it's completed +func (c ManagedClustersClient) ResetServicePrincipalProfileThenPoll(ctx context.Context, id commonids.KubernetesClusterId, input ManagedClusterServicePrincipalProfile) error { + result, err := c.ResetServicePrincipalProfile(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ResetServicePrincipalProfile: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ResetServicePrincipalProfile: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_rotateclustercertificates.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_rotateclustercertificates.go new file mode 100644 index 00000000000..3d04938f645 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_rotateclustercertificates.go @@ -0,0 +1,71 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RotateClusterCertificatesOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// RotateClusterCertificates ... +func (c ManagedClustersClient) RotateClusterCertificates(ctx context.Context, id commonids.KubernetesClusterId) (result RotateClusterCertificatesOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/rotateClusterCertificates", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// RotateClusterCertificatesThenPoll performs RotateClusterCertificates then polls until it's completed +func (c ManagedClustersClient) RotateClusterCertificatesThenPoll(ctx context.Context, id commonids.KubernetesClusterId) error { + result, err := c.RotateClusterCertificates(ctx, id) + if err != nil { + return fmt.Errorf("performing RotateClusterCertificates: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after RotateClusterCertificates: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_rotateserviceaccountsigningkeys.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_rotateserviceaccountsigningkeys.go new file mode 100644 index 00000000000..b4cecc16d85 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_rotateserviceaccountsigningkeys.go @@ -0,0 +1,71 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RotateServiceAccountSigningKeysOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// RotateServiceAccountSigningKeys ... +func (c ManagedClustersClient) RotateServiceAccountSigningKeys(ctx context.Context, id commonids.KubernetesClusterId) (result RotateServiceAccountSigningKeysOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/rotateServiceAccountSigningKeys", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// RotateServiceAccountSigningKeysThenPoll performs RotateServiceAccountSigningKeys then polls until it's completed +func (c ManagedClustersClient) RotateServiceAccountSigningKeysThenPoll(ctx context.Context, id commonids.KubernetesClusterId) error { + result, err := c.RotateServiceAccountSigningKeys(ctx, id) + if err != nil { + return fmt.Errorf("performing RotateServiceAccountSigningKeys: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after RotateServiceAccountSigningKeys: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_runcommand.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_runcommand.go new file mode 100644 index 00000000000..b64b4ec1b20 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_runcommand.go @@ -0,0 +1,76 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RunCommandOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *RunCommandResult +} + +// RunCommand ... +func (c ManagedClustersClient) RunCommand(ctx context.Context, id commonids.KubernetesClusterId, input RunCommandRequest) (result RunCommandOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/runCommand", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// RunCommandThenPoll performs RunCommand then polls until it's completed +func (c ManagedClustersClient) RunCommandThenPoll(ctx context.Context, id commonids.KubernetesClusterId, input RunCommandRequest) error { + result, err := c.RunCommand(ctx, id, input) + if err != nil { + return fmt.Errorf("performing RunCommand: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after RunCommand: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_start.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_start.go new file mode 100644 index 00000000000..ba4549e7615 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_start.go @@ -0,0 +1,71 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Start ... +func (c ManagedClustersClient) Start(ctx context.Context, id commonids.KubernetesClusterId) (result StartOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/start", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// StartThenPoll performs Start then polls until it's completed +func (c ManagedClustersClient) StartThenPoll(ctx context.Context, id commonids.KubernetesClusterId) error { + result, err := c.Start(ctx, id) + if err != nil { + return fmt.Errorf("performing Start: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Start: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_stop.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_stop.go new file mode 100644 index 00000000000..3bec609e3b4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_stop.go @@ -0,0 +1,71 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StopOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Stop ... +func (c ManagedClustersClient) Stop(ctx context.Context, id commonids.KubernetesClusterId) (result StopOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/stop", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// StopThenPoll performs Stop then polls until it's completed +func (c ManagedClustersClient) StopThenPoll(ctx context.Context, id commonids.KubernetesClusterId) error { + result, err := c.Stop(ctx, id) + if err != nil { + return fmt.Errorf("performing Stop: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Stop: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/method_updatetags.go b/resource-manager/containerservice/2025-05-01/managedclusters/method_updatetags.go new file mode 100644 index 00000000000..ef3920b358e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/method_updatetags.go @@ -0,0 +1,104 @@ +package managedclusters + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateTagsOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *ManagedCluster +} + +type UpdateTagsOperationOptions struct { + IfMatch *string +} + +func DefaultUpdateTagsOperationOptions() UpdateTagsOperationOptions { + return UpdateTagsOperationOptions{} +} + +func (o UpdateTagsOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + if o.IfMatch != nil { + out.Append("If-Match", fmt.Sprintf("%v", *o.IfMatch)) + } + return &out +} + +func (o UpdateTagsOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o UpdateTagsOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + + return &out +} + +// UpdateTags ... +func (c ManagedClustersClient) UpdateTags(ctx context.Context, id commonids.KubernetesClusterId, input TagsObject, options UpdateTagsOperationOptions) (result UpdateTagsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateTagsThenPoll performs UpdateTags then polls until it's completed +func (c ManagedClustersClient) UpdateTagsThenPoll(ctx context.Context, id commonids.KubernetesClusterId, input TagsObject, options UpdateTagsOperationOptions) error { + result, err := c.UpdateTags(ctx, id, input, options) + if err != nil { + return fmt.Errorf("performing UpdateTags: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after UpdateTags: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_accessprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_accessprofile.go new file mode 100644 index 00000000000..c13b64ab25d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_accessprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccessProfile struct { + KubeConfig *string `json:"kubeConfig,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworking.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworking.go new file mode 100644 index 00000000000..48c6487fdee --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworking.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AdvancedNetworking struct { + Enabled *bool `json:"enabled,omitempty"` + Observability *AdvancedNetworkingObservability `json:"observability,omitempty"` + Security *AdvancedNetworkingSecurity `json:"security,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworkingobservability.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworkingobservability.go new file mode 100644 index 00000000000..a491cdc7a17 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworkingobservability.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AdvancedNetworkingObservability struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworkingsecurity.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworkingsecurity.go new file mode 100644 index 00000000000..9243f025d77 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_advancednetworkingsecurity.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AdvancedNetworkingSecurity struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolgatewayprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolgatewayprofile.go new file mode 100644 index 00000000000..25f83816911 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolgatewayprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolGatewayProfile struct { + PublicIPPrefixSize *int64 `json:"publicIPPrefixSize,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolnetworkprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolnetworkprofile.go new file mode 100644 index 00000000000..1a5b19f5db1 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolnetworkprofile.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolNetworkProfile struct { + AllowedHostPorts *[]PortRange `json:"allowedHostPorts,omitempty"` + ApplicationSecurityGroups *[]string `json:"applicationSecurityGroups,omitempty"` + NodePublicIPTags *[]IPTag `json:"nodePublicIPTags,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolsecurityprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolsecurityprofile.go new file mode 100644 index 00000000000..d4e84920034 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolsecurityprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolSecurityProfile struct { + EnableSecureBoot *bool `json:"enableSecureBoot,omitempty"` + EnableVTPM *bool `json:"enableVTPM,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolstatus.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolstatus.go new file mode 100644 index 00000000000..50ab6727d52 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolstatus.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolStatus struct { + ProvisioningError *CloudErrorBody `json:"provisioningError,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolupgradesettings.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolupgradesettings.go new file mode 100644 index 00000000000..5f60f04ec58 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolupgradesettings.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolUpgradeSettings struct { + DrainTimeoutInMinutes *int64 `json:"drainTimeoutInMinutes,omitempty"` + MaxSurge *string `json:"maxSurge,omitempty"` + MaxUnavailable *string `json:"maxUnavailable,omitempty"` + NodeSoakDurationInMinutes *int64 `json:"nodeSoakDurationInMinutes,omitempty"` + UndrainableNodeBehavior *UndrainableNodeBehavior `json:"undrainableNodeBehavior,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolwindowsprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolwindowsprofile.go new file mode 100644 index 00000000000..5558f04ed00 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_agentpoolwindowsprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AgentPoolWindowsProfile struct { + DisableOutboundNat *bool `json:"disableOutboundNat,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_azurekeyvaultkms.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_azurekeyvaultkms.go new file mode 100644 index 00000000000..57c4e7848f3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_azurekeyvaultkms.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureKeyVaultKms struct { + Enabled *bool `json:"enabled,omitempty"` + KeyId *string `json:"keyId,omitempty"` + KeyVaultNetworkAccess *KeyVaultNetworkAccessTypes `json:"keyVaultNetworkAccess,omitempty"` + KeyVaultResourceId *string `json:"keyVaultResourceId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_clouderrorbody.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_clouderrorbody.go new file mode 100644 index 00000000000..bb8ecbdfb9d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_clouderrorbody.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CloudErrorBody struct { + Code *string `json:"code,omitempty"` + Details *[]CloudErrorBody `json:"details,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_clusterupgradesettings.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_clusterupgradesettings.go new file mode 100644 index 00000000000..1510493ca17 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_clusterupgradesettings.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ClusterUpgradeSettings struct { + OverrideSettings *UpgradeOverrideSettings `json:"overrideSettings,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_commandresultproperties.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_commandresultproperties.go new file mode 100644 index 00000000000..745a87ae4ea --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_commandresultproperties.go @@ -0,0 +1,43 @@ +package managedclusters + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandResultProperties struct { + ExitCode *int64 `json:"exitCode,omitempty"` + FinishedAt *string `json:"finishedAt,omitempty"` + Logs *string `json:"logs,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Reason *string `json:"reason,omitempty"` + StartedAt *string `json:"startedAt,omitempty"` +} + +func (o *CommandResultProperties) GetFinishedAtAsTime() (*time.Time, error) { + if o.FinishedAt == nil { + return nil, nil + } + return dates.ParseAsFormat(o.FinishedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o *CommandResultProperties) SetFinishedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.FinishedAt = &formatted +} + +func (o *CommandResultProperties) GetStartedAtAsTime() (*time.Time, error) { + if o.StartedAt == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedAt, "2006-01-02T15:04:05Z07:00") +} + +func (o *CommandResultProperties) SetStartedAtAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedAt = &formatted +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_compatibleversions.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_compatibleversions.go new file mode 100644 index 00000000000..585f8e04d7b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_compatibleversions.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CompatibleVersions struct { + Name *string `json:"name,omitempty"` + Versions *[]string `json:"versions,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicelinuxprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicelinuxprofile.go new file mode 100644 index 00000000000..a3abf14adeb --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicelinuxprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceLinuxProfile struct { + AdminUsername string `json:"adminUsername"` + Ssh ContainerServiceSshConfiguration `json:"ssh"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicenetworkprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicenetworkprofile.go new file mode 100644 index 00000000000..1742732e5c4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicenetworkprofile.go @@ -0,0 +1,24 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceNetworkProfile struct { + AdvancedNetworking *AdvancedNetworking `json:"advancedNetworking,omitempty"` + DnsServiceIP *string `json:"dnsServiceIP,omitempty"` + IPFamilies *[]IPFamily `json:"ipFamilies,omitempty"` + LoadBalancerProfile *ManagedClusterLoadBalancerProfile `json:"loadBalancerProfile,omitempty"` + LoadBalancerSku *LoadBalancerSku `json:"loadBalancerSku,omitempty"` + NatGatewayProfile *ManagedClusterNATGatewayProfile `json:"natGatewayProfile,omitempty"` + NetworkDataplane *NetworkDataplane `json:"networkDataplane,omitempty"` + NetworkMode *NetworkMode `json:"networkMode,omitempty"` + NetworkPlugin *NetworkPlugin `json:"networkPlugin,omitempty"` + NetworkPluginMode *NetworkPluginMode `json:"networkPluginMode,omitempty"` + NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"` + OutboundType *OutboundType `json:"outboundType,omitempty"` + PodCidr *string `json:"podCidr,omitempty"` + PodCidrs *[]string `json:"podCidrs,omitempty"` + ServiceCidr *string `json:"serviceCidr,omitempty"` + ServiceCidrs *[]string `json:"serviceCidrs,omitempty"` + StaticEgressGatewayProfile *ManagedClusterStaticEgressGatewayProfile `json:"staticEgressGatewayProfile,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicesshconfiguration.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicesshconfiguration.go new file mode 100644 index 00000000000..4bf259bd15d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicesshconfiguration.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceSshConfiguration struct { + PublicKeys []ContainerServiceSshPublicKey `json:"publicKeys"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicesshpublickey.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicesshpublickey.go new file mode 100644 index 00000000000..7796cdf6a90 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_containerservicesshpublickey.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ContainerServiceSshPublicKey struct { + KeyData string `json:"keyData"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_creationdata.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_creationdata.go new file mode 100644 index 00000000000..f2f19907006 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_creationdata.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreationData struct { + SourceResourceId *string `json:"sourceResourceId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_credentialresult.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_credentialresult.go new file mode 100644 index 00000000000..7002f56084e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_credentialresult.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CredentialResult struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_credentialresults.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_credentialresults.go new file mode 100644 index 00000000000..c572432888e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_credentialresults.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CredentialResults struct { + Kubeconfigs *[]CredentialResult `json:"kubeconfigs,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_endpointdependency.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_endpointdependency.go new file mode 100644 index 00000000000..ae8639b529b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_endpointdependency.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EndpointDependency struct { + DomainName *string `json:"domainName,omitempty"` + EndpointDetails *[]EndpointDetail `json:"endpointDetails,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_endpointdetail.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_endpointdetail.go new file mode 100644 index 00000000000..de73a2e9168 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_endpointdetail.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EndpointDetail struct { + Description *string `json:"description,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + Port *int64 `json:"port,omitempty"` + Protocol *string `json:"protocol,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_gpuprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_gpuprofile.go new file mode 100644 index 00000000000..e0f15232e1c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_gpuprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GPUProfile struct { + Driver *GPUDriver `json:"driver,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_iptag.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_iptag.go new file mode 100644 index 00000000000..0aefb50d15f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_iptag.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IPTag struct { + IPTagType *string `json:"ipTagType,omitempty"` + Tag *string `json:"tag,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_istiocertificateauthority.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_istiocertificateauthority.go new file mode 100644 index 00000000000..1580575b782 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_istiocertificateauthority.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IstioCertificateAuthority struct { + Plugin *IstioPluginCertificateAuthority `json:"plugin,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_istiocomponents.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_istiocomponents.go new file mode 100644 index 00000000000..6dff6ff1534 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_istiocomponents.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IstioComponents struct { + EgressGateways *[]IstioEgressGateway `json:"egressGateways,omitempty"` + IngressGateways *[]IstioIngressGateway `json:"ingressGateways,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_istioegressgateway.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioegressgateway.go new file mode 100644 index 00000000000..40497051c52 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioegressgateway.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IstioEgressGateway struct { + Enabled bool `json:"enabled"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_istioingressgateway.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioingressgateway.go new file mode 100644 index 00000000000..12832f48114 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioingressgateway.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IstioIngressGateway struct { + Enabled bool `json:"enabled"` + Mode IstioIngressGatewayMode `json:"mode"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_istioplugincertificateauthority.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioplugincertificateauthority.go new file mode 100644 index 00000000000..4ae2f736c6a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioplugincertificateauthority.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IstioPluginCertificateAuthority struct { + CertChainObjectName *string `json:"certChainObjectName,omitempty"` + CertObjectName *string `json:"certObjectName,omitempty"` + KeyObjectName *string `json:"keyObjectName,omitempty"` + KeyVaultId *string `json:"keyVaultId,omitempty"` + RootCertObjectName *string `json:"rootCertObjectName,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_istioservicemesh.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioservicemesh.go new file mode 100644 index 00000000000..968c2f018dc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_istioservicemesh.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IstioServiceMesh struct { + CertificateAuthority *IstioCertificateAuthority `json:"certificateAuthority,omitempty"` + Components *IstioComponents `json:"components,omitempty"` + Revisions *[]string `json:"revisions,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_kubeletconfig.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubeletconfig.go new file mode 100644 index 00000000000..0f4036b4e7c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubeletconfig.go @@ -0,0 +1,18 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubeletConfig struct { + AllowedUnsafeSysctls *[]string `json:"allowedUnsafeSysctls,omitempty"` + ContainerLogMaxFiles *int64 `json:"containerLogMaxFiles,omitempty"` + ContainerLogMaxSizeMB *int64 `json:"containerLogMaxSizeMB,omitempty"` + CpuCfsQuota *bool `json:"cpuCfsQuota,omitempty"` + CpuCfsQuotaPeriod *string `json:"cpuCfsQuotaPeriod,omitempty"` + CpuManagerPolicy *string `json:"cpuManagerPolicy,omitempty"` + FailSwapOn *bool `json:"failSwapOn,omitempty"` + ImageGcHighThreshold *int64 `json:"imageGcHighThreshold,omitempty"` + ImageGcLowThreshold *int64 `json:"imageGcLowThreshold,omitempty"` + PodMaxPids *int64 `json:"podMaxPids,omitempty"` + TopologyManagerPolicy *string `json:"topologyManagerPolicy,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetespatchversion.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetespatchversion.go new file mode 100644 index 00000000000..017737dac6a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetespatchversion.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubernetesPatchVersion struct { + Upgrades *[]string `json:"upgrades,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversion.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversion.go new file mode 100644 index 00000000000..d1ad17500d2 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversion.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubernetesVersion struct { + Capabilities *KubernetesVersionCapabilities `json:"capabilities,omitempty"` + IsDefault *bool `json:"isDefault,omitempty"` + IsPreview *bool `json:"isPreview,omitempty"` + PatchVersions *map[string]KubernetesPatchVersion `json:"patchVersions,omitempty"` + Version *string `json:"version,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversioncapabilities.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversioncapabilities.go new file mode 100644 index 00000000000..b5729422404 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversioncapabilities.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubernetesVersionCapabilities struct { + SupportPlan *[]KubernetesSupportPlan `json:"supportPlan,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversionlistresult.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversionlistresult.go new file mode 100644 index 00000000000..a8b1127a230 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_kubernetesversionlistresult.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KubernetesVersionListResult struct { + Values *[]KubernetesVersion `json:"values,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_linuxosconfig.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_linuxosconfig.go new file mode 100644 index 00000000000..431f6faa2aa --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_linuxosconfig.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type LinuxOSConfig struct { + SwapFileSizeMB *int64 `json:"swapFileSizeMB,omitempty"` + Sysctls *SysctlConfig `json:"sysctls,omitempty"` + TransparentHugePageDefrag *string `json:"transparentHugePageDefrag,omitempty"` + TransparentHugePageEnabled *string `json:"transparentHugePageEnabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedcluster.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedcluster.go new file mode 100644 index 00000000000..1eafd7b3285 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedcluster.go @@ -0,0 +1,24 @@ +package managedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/edgezones" + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedCluster struct { + ETag *string `json:"eTag,omitempty"` + ExtendedLocation *edgezones.Model `json:"extendedLocation,omitempty"` + Id *string `json:"id,omitempty"` + Identity *identity.SystemOrUserAssignedMap `json:"identity,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *ManagedClusterProperties `json:"properties,omitempty"` + Sku *ManagedClusterSKU `json:"sku,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraadprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraadprofile.go new file mode 100644 index 00000000000..271addb1fd4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraadprofile.go @@ -0,0 +1,14 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAADProfile struct { + AdminGroupObjectIDs *[]string `json:"adminGroupObjectIDs,omitempty"` + ClientAppID *string `json:"clientAppID,omitempty"` + EnableAzureRBAC *bool `json:"enableAzureRBAC,omitempty"` + Managed *bool `json:"managed,omitempty"` + ServerAppID *string `json:"serverAppID,omitempty"` + ServerAppSecret *string `json:"serverAppSecret,omitempty"` + TenantID *string `json:"tenantID,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraccessprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraccessprofile.go new file mode 100644 index 00000000000..e248b2a253b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraccessprofile.go @@ -0,0 +1,18 @@ +package managedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAccessProfile struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *AccessProfile `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraddonprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraddonprofile.go new file mode 100644 index 00000000000..7dc1f865d9d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraddonprofile.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAddonProfile struct { + Config *map[string]string `json:"config,omitempty"` + Enabled bool `json:"enabled"` + Identity *UserAssignedIdentity `json:"identity,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteragentpoolprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteragentpoolprofile.go new file mode 100644 index 00000000000..8270edcd22d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteragentpoolprofile.go @@ -0,0 +1,65 @@ +package managedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAgentPoolProfile struct { + AvailabilityZones *zones.Schema `json:"availabilityZones,omitempty"` + CapacityReservationGroupID *string `json:"capacityReservationGroupID,omitempty"` + Count *int64 `json:"count,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + CurrentOrchestratorVersion *string `json:"currentOrchestratorVersion,omitempty"` + ETag *string `json:"eTag,omitempty"` + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + EnableEncryptionAtHost *bool `json:"enableEncryptionAtHost,omitempty"` + EnableFIPS *bool `json:"enableFIPS,omitempty"` + EnableNodePublicIP *bool `json:"enableNodePublicIP,omitempty"` + EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + GatewayProfile *AgentPoolGatewayProfile `json:"gatewayProfile,omitempty"` + GpuInstanceProfile *GPUInstanceProfile `json:"gpuInstanceProfile,omitempty"` + GpuProfile *GPUProfile `json:"gpuProfile,omitempty"` + HostGroupID *string `json:"hostGroupID,omitempty"` + KubeletConfig *KubeletConfig `json:"kubeletConfig,omitempty"` + KubeletDiskType *KubeletDiskType `json:"kubeletDiskType,omitempty"` + LinuxOSConfig *LinuxOSConfig `json:"linuxOSConfig,omitempty"` + MaxCount *int64 `json:"maxCount,omitempty"` + MaxPods *int64 `json:"maxPods,omitempty"` + MessageOfTheDay *string `json:"messageOfTheDay,omitempty"` + MinCount *int64 `json:"minCount,omitempty"` + Mode *AgentPoolMode `json:"mode,omitempty"` + Name string `json:"name"` + NetworkProfile *AgentPoolNetworkProfile `json:"networkProfile,omitempty"` + NodeImageVersion *string `json:"nodeImageVersion,omitempty"` + NodeLabels *map[string]string `json:"nodeLabels,omitempty"` + NodePublicIPPrefixID *string `json:"nodePublicIPPrefixID,omitempty"` + NodeTaints *[]string `json:"nodeTaints,omitempty"` + OrchestratorVersion *string `json:"orchestratorVersion,omitempty"` + OsDiskSizeGB *int64 `json:"osDiskSizeGB,omitempty"` + OsDiskType *OSDiskType `json:"osDiskType,omitempty"` + OsSKU *OSSKU `json:"osSKU,omitempty"` + OsType *OSType `json:"osType,omitempty"` + PodIPAllocationMode *PodIPAllocationMode `json:"podIPAllocationMode,omitempty"` + PodSubnetID *string `json:"podSubnetID,omitempty"` + PowerState *PowerState `json:"powerState,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroupID *string `json:"proximityPlacementGroupID,omitempty"` + ScaleDownMode *ScaleDownMode `json:"scaleDownMode,omitempty"` + ScaleSetEvictionPolicy *ScaleSetEvictionPolicy `json:"scaleSetEvictionPolicy,omitempty"` + ScaleSetPriority *ScaleSetPriority `json:"scaleSetPriority,omitempty"` + SecurityProfile *AgentPoolSecurityProfile `json:"securityProfile,omitempty"` + SpotMaxPrice *float64 `json:"spotMaxPrice,omitempty"` + Status *AgentPoolStatus `json:"status,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *AgentPoolType `json:"type,omitempty"` + UpgradeSettings *AgentPoolUpgradeSettings `json:"upgradeSettings,omitempty"` + VMSize *string `json:"vmSize,omitempty"` + VirtualMachineNodesStatus *[]VirtualMachineNodes `json:"virtualMachineNodesStatus,omitempty"` + VirtualMachinesProfile *VirtualMachinesProfile `json:"virtualMachinesProfile,omitempty"` + VnetSubnetID *string `json:"vnetSubnetID,omitempty"` + WindowsProfile *AgentPoolWindowsProfile `json:"windowsProfile,omitempty"` + WorkloadRuntime *WorkloadRuntime `json:"workloadRuntime,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraitoolchainoperatorprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraitoolchainoperatorprofile.go new file mode 100644 index 00000000000..0c2d87296e0 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteraitoolchainoperatorprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAIToolchainOperatorProfile struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterapiserveraccessprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterapiserveraccessprofile.go new file mode 100644 index 00000000000..69b53ef85a7 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterapiserveraccessprofile.go @@ -0,0 +1,14 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAPIServerAccessProfile struct { + AuthorizedIPRanges *[]string `json:"authorizedIPRanges,omitempty"` + DisableRunCommand *bool `json:"disableRunCommand,omitempty"` + EnablePrivateCluster *bool `json:"enablePrivateCluster,omitempty"` + EnablePrivateClusterPublicFQDN *bool `json:"enablePrivateClusterPublicFQDN,omitempty"` + EnableVnetIntegration *bool `json:"enableVnetIntegration,omitempty"` + PrivateDNSZone *string `json:"privateDNSZone,omitempty"` + SubnetId *string `json:"subnetId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterautoupgradeprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterautoupgradeprofile.go new file mode 100644 index 00000000000..e43a6df257f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterautoupgradeprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAutoUpgradeProfile struct { + NodeOSUpgradeChannel *NodeOSUpgradeChannel `json:"nodeOSUpgradeChannel,omitempty"` + UpgradeChannel *UpgradeChannel `json:"upgradeChannel,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofile.go new file mode 100644 index 00000000000..8a18140e45a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAzureMonitorProfile struct { + Metrics *ManagedClusterAzureMonitorProfileMetrics `json:"metrics,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofilekubestatemetrics.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofilekubestatemetrics.go new file mode 100644 index 00000000000..ef0fb9066e2 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofilekubestatemetrics.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAzureMonitorProfileKubeStateMetrics struct { + MetricAnnotationsAllowList *string `json:"metricAnnotationsAllowList,omitempty"` + MetricLabelsAllowlist *string `json:"metricLabelsAllowlist,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofilemetrics.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofilemetrics.go new file mode 100644 index 00000000000..5c173441517 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterazuremonitorprofilemetrics.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterAzureMonitorProfileMetrics struct { + Enabled bool `json:"enabled"` + KubeStateMetrics *ManagedClusterAzureMonitorProfileKubeStateMetrics `json:"kubeStateMetrics,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterbootstrapprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterbootstrapprofile.go new file mode 100644 index 00000000000..5515399cb05 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterbootstrapprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterBootstrapProfile struct { + ArtifactSource *ArtifactSource `json:"artifactSource,omitempty"` + ContainerRegistryId *string `json:"containerRegistryId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustercostanalysis.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustercostanalysis.go new file mode 100644 index 00000000000..a54a571fe4e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustercostanalysis.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterCostAnalysis struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterhttpproxyconfig.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterhttpproxyconfig.go new file mode 100644 index 00000000000..33161aad969 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterhttpproxyconfig.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterHTTPProxyConfig struct { + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy *[]string `json:"noProxy,omitempty"` + TrustedCa *string `json:"trustedCa,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofile.go new file mode 100644 index 00000000000..ef54c8e8069 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterIngressProfile struct { + WebAppRouting *ManagedClusterIngressProfileWebAppRouting `json:"webAppRouting,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofilenginx.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofilenginx.go new file mode 100644 index 00000000000..60f57dd6a49 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofilenginx.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterIngressProfileNginx struct { + DefaultIngressControllerType *NginxIngressControllerType `json:"defaultIngressControllerType,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofilewebapprouting.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofilewebapprouting.go new file mode 100644 index 00000000000..9849e8ecc14 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteringressprofilewebapprouting.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterIngressProfileWebAppRouting struct { + DnsZoneResourceIds *[]string `json:"dnsZoneResourceIds,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Identity *UserAssignedIdentity `json:"identity,omitempty"` + Nginx *ManagedClusterIngressProfileNginx `json:"nginx,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofile.go new file mode 100644 index 00000000000..0d2207851bd --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofile.go @@ -0,0 +1,15 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfile struct { + AllocatedOutboundPorts *int64 `json:"allocatedOutboundPorts,omitempty"` + BackendPoolType *BackendPoolType `json:"backendPoolType,omitempty"` + EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"` + EnableMultipleStandardLoadBalancers *bool `json:"enableMultipleStandardLoadBalancers,omitempty"` + IdleTimeoutInMinutes *int64 `json:"idleTimeoutInMinutes,omitempty"` + ManagedOutboundIPs *ManagedClusterLoadBalancerProfileManagedOutboundIPs `json:"managedOutboundIPs,omitempty"` + OutboundIPPrefixes *ManagedClusterLoadBalancerProfileOutboundIPPrefixes `json:"outboundIPPrefixes,omitempty"` + OutboundIPs *ManagedClusterLoadBalancerProfileOutboundIPs `json:"outboundIPs,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofilemanagedoutboundips.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofilemanagedoutboundips.go new file mode 100644 index 00000000000..b8d24680fed --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofilemanagedoutboundips.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfileManagedOutboundIPs struct { + Count *int64 `json:"count,omitempty"` + CountIPv6 *int64 `json:"countIPv6,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofileoutboundipprefixes.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofileoutboundipprefixes.go new file mode 100644 index 00000000000..76893c76d85 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofileoutboundipprefixes.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfileOutboundIPPrefixes struct { + PublicIPPrefixes *[]ResourceReference `json:"publicIPPrefixes,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofileoutboundips.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofileoutboundips.go new file mode 100644 index 00000000000..f0a60f5817e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterloadbalancerprofileoutboundips.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterLoadBalancerProfileOutboundIPs struct { + PublicIPs *[]ResourceReference `json:"publicIPs,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustermanagedoutboundipprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustermanagedoutboundipprofile.go new file mode 100644 index 00000000000..ec079b6cdf4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustermanagedoutboundipprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterManagedOutboundIPProfile struct { + Count *int64 `json:"count,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustermetricsprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustermetricsprofile.go new file mode 100644 index 00000000000..fd466216881 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustermetricsprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterMetricsProfile struct { + CostAnalysis *ManagedClusterCostAnalysis `json:"costAnalysis,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternatgatewayprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternatgatewayprofile.go new file mode 100644 index 00000000000..a7e37699c62 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternatgatewayprofile.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterNATGatewayProfile struct { + EffectiveOutboundIPs *[]ResourceReference `json:"effectiveOutboundIPs,omitempty"` + IdleTimeoutInMinutes *int64 `json:"idleTimeoutInMinutes,omitempty"` + ManagedOutboundIPProfile *ManagedClusterManagedOutboundIPProfile `json:"managedOutboundIPProfile,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternodeprovisioningprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternodeprovisioningprofile.go new file mode 100644 index 00000000000..441f511c302 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternodeprovisioningprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterNodeProvisioningProfile struct { + DefaultNodePools *NodeProvisioningDefaultNodePools `json:"defaultNodePools,omitempty"` + Mode *NodeProvisioningMode `json:"mode,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternoderesourcegroupprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternoderesourcegroupprofile.go new file mode 100644 index 00000000000..1ae3147c267 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusternoderesourcegroupprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterNodeResourceGroupProfile struct { + RestrictionLevel *RestrictionLevel `json:"restrictionLevel,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteroidcissuerprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteroidcissuerprofile.go new file mode 100644 index 00000000000..b5d3be88f7d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusteroidcissuerprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterOIDCIssuerProfile struct { + Enabled *bool `json:"enabled,omitempty"` + IssuerURL *string `json:"issuerURL,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentity.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentity.go new file mode 100644 index 00000000000..3bfb6799645 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentity.go @@ -0,0 +1,13 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentity struct { + BindingSelector *string `json:"bindingSelector,omitempty"` + Identity UserAssignedIdentity `json:"identity"` + Name string `json:"name"` + Namespace string `json:"namespace"` + ProvisioningInfo *ManagedClusterPodIdentityProvisioningInfo `json:"provisioningInfo,omitempty"` + ProvisioningState *ManagedClusterPodIdentityProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityexception.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityexception.go new file mode 100644 index 00000000000..28a710f9b5c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityexception.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityException struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + PodLabels map[string]string `json:"podLabels"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprofile.go new file mode 100644 index 00000000000..e32db30028a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProfile struct { + AllowNetworkPluginKubenet *bool `json:"allowNetworkPluginKubenet,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + UserAssignedIdentities *[]ManagedClusterPodIdentity `json:"userAssignedIdentities,omitempty"` + UserAssignedIdentityExceptions *[]ManagedClusterPodIdentityException `json:"userAssignedIdentityExceptions,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioningerror.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioningerror.go new file mode 100644 index 00000000000..1fb66d9034f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioningerror.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProvisioningError struct { + Error *ManagedClusterPodIdentityProvisioningErrorBody `json:"error,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioningerrorbody.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioningerrorbody.go new file mode 100644 index 00000000000..db4466f80bf --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioningerrorbody.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProvisioningErrorBody struct { + Code *string `json:"code,omitempty"` + Details *[]ManagedClusterPodIdentityProvisioningErrorBody `json:"details,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioninginfo.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioninginfo.go new file mode 100644 index 00000000000..60464e39c82 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpodidentityprovisioninginfo.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPodIdentityProvisioningInfo struct { + Error *ManagedClusterPodIdentityProvisioningError `json:"error,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpoolupgradeprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpoolupgradeprofile.go new file mode 100644 index 00000000000..7bafa5df972 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpoolupgradeprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPoolUpgradeProfile struct { + KubernetesVersion string `json:"kubernetesVersion"` + Name *string `json:"name,omitempty"` + OsType OSType `json:"osType"` + Upgrades *[]ManagedClusterPoolUpgradeProfileUpgradesInlined `json:"upgrades,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpoolupgradeprofileupgradesinlined.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpoolupgradeprofileupgradesinlined.go new file mode 100644 index 00000000000..ccc02dd9957 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpoolupgradeprofileupgradesinlined.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPoolUpgradeProfileUpgradesInlined struct { + IsPreview *bool `json:"isPreview,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterproperties.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterproperties.go new file mode 100644 index 00000000000..faa7ef5084e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterproperties.go @@ -0,0 +1,52 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterProperties struct { + AadProfile *ManagedClusterAADProfile `json:"aadProfile,omitempty"` + AddonProfiles *map[string]ManagedClusterAddonProfile `json:"addonProfiles,omitempty"` + AgentPoolProfiles *[]ManagedClusterAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + AiToolchainOperatorProfile *ManagedClusterAIToolchainOperatorProfile `json:"aiToolchainOperatorProfile,omitempty"` + ApiServerAccessProfile *ManagedClusterAPIServerAccessProfile `json:"apiServerAccessProfile,omitempty"` + AutoScalerProfile *ManagedClusterPropertiesAutoScalerProfile `json:"autoScalerProfile,omitempty"` + AutoUpgradeProfile *ManagedClusterAutoUpgradeProfile `json:"autoUpgradeProfile,omitempty"` + AzureMonitorProfile *ManagedClusterAzureMonitorProfile `json:"azureMonitorProfile,omitempty"` + AzurePortalFQDN *string `json:"azurePortalFQDN,omitempty"` + BootstrapProfile *ManagedClusterBootstrapProfile `json:"bootstrapProfile,omitempty"` + CurrentKubernetesVersion *string `json:"currentKubernetesVersion,omitempty"` + DisableLocalAccounts *bool `json:"disableLocalAccounts,omitempty"` + DiskEncryptionSetID *string `json:"diskEncryptionSetID,omitempty"` + DnsPrefix *string `json:"dnsPrefix,omitempty"` + EnableRBAC *bool `json:"enableRBAC,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + FqdnSubdomain *string `json:"fqdnSubdomain,omitempty"` + HTTPProxyConfig *ManagedClusterHTTPProxyConfig `json:"httpProxyConfig,omitempty"` + IdentityProfile *map[string]UserAssignedIdentity `json:"identityProfile,omitempty"` + IngressProfile *ManagedClusterIngressProfile `json:"ingressProfile,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + LinuxProfile *ContainerServiceLinuxProfile `json:"linuxProfile,omitempty"` + MaxAgentPools *int64 `json:"maxAgentPools,omitempty"` + MetricsProfile *ManagedClusterMetricsProfile `json:"metricsProfile,omitempty"` + NetworkProfile *ContainerServiceNetworkProfile `json:"networkProfile,omitempty"` + NodeProvisioningProfile *ManagedClusterNodeProvisioningProfile `json:"nodeProvisioningProfile,omitempty"` + NodeResourceGroup *string `json:"nodeResourceGroup,omitempty"` + NodeResourceGroupProfile *ManagedClusterNodeResourceGroupProfile `json:"nodeResourceGroupProfile,omitempty"` + OidcIssuerProfile *ManagedClusterOIDCIssuerProfile `json:"oidcIssuerProfile,omitempty"` + PodIdentityProfile *ManagedClusterPodIdentityProfile `json:"podIdentityProfile,omitempty"` + PowerState *PowerState `json:"powerState,omitempty"` + PrivateFQDN *string `json:"privateFQDN,omitempty"` + PrivateLinkResources *[]PrivateLinkResource `json:"privateLinkResources,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + ResourceUID *string `json:"resourceUID,omitempty"` + SecurityProfile *ManagedClusterSecurityProfile `json:"securityProfile,omitempty"` + ServiceMeshProfile *ServiceMeshProfile `json:"serviceMeshProfile,omitempty"` + ServicePrincipalProfile *ManagedClusterServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + Status *ManagedClusterStatus `json:"status,omitempty"` + StorageProfile *ManagedClusterStorageProfile `json:"storageProfile,omitempty"` + SupportPlan *KubernetesSupportPlan `json:"supportPlan,omitempty"` + UpgradeSettings *ClusterUpgradeSettings `json:"upgradeSettings,omitempty"` + WindowsProfile *ManagedClusterWindowsProfile `json:"windowsProfile,omitempty"` + WorkloadAutoScalerProfile *ManagedClusterWorkloadAutoScalerProfile `json:"workloadAutoScalerProfile,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpropertiesautoscalerprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpropertiesautoscalerprofile.go new file mode 100644 index 00000000000..3f718456e6e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterpropertiesautoscalerprofile.go @@ -0,0 +1,27 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterPropertiesAutoScalerProfile struct { + BalanceSimilarNodeGroups *string `json:"balance-similar-node-groups,omitempty"` + DaemonsetEvictionForEmptyNodes *bool `json:"daemonset-eviction-for-empty-nodes,omitempty"` + DaemonsetEvictionForOccupiedNodes *bool `json:"daemonset-eviction-for-occupied-nodes,omitempty"` + Expander *Expander `json:"expander,omitempty"` + IgnoreDaemonsetsUtilization *bool `json:"ignore-daemonsets-utilization,omitempty"` + MaxEmptyBulkDelete *string `json:"max-empty-bulk-delete,omitempty"` + MaxGracefulTerminationSec *string `json:"max-graceful-termination-sec,omitempty"` + MaxNodeProvisionTime *string `json:"max-node-provision-time,omitempty"` + MaxTotalUnreadyPercentage *string `json:"max-total-unready-percentage,omitempty"` + NewPodScaleUpDelay *string `json:"new-pod-scale-up-delay,omitempty"` + OkTotalUnreadyCount *string `json:"ok-total-unready-count,omitempty"` + ScaleDownDelayAfterAdd *string `json:"scale-down-delay-after-add,omitempty"` + ScaleDownDelayAfterDelete *string `json:"scale-down-delay-after-delete,omitempty"` + ScaleDownDelayAfterFailure *string `json:"scale-down-delay-after-failure,omitempty"` + ScaleDownUnneededTime *string `json:"scale-down-unneeded-time,omitempty"` + ScaleDownUnreadyTime *string `json:"scale-down-unready-time,omitempty"` + ScaleDownUtilizationThreshold *string `json:"scale-down-utilization-threshold,omitempty"` + ScanInterval *string `json:"scan-interval,omitempty"` + SkipNodesWithLocalStorage *string `json:"skip-nodes-with-local-storage,omitempty"` + SkipNodesWithSystemPods *string `json:"skip-nodes-with-system-pods,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofile.go new file mode 100644 index 00000000000..1c5892f9364 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofile.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfile struct { + AzureKeyVaultKms *AzureKeyVaultKms `json:"azureKeyVaultKms,omitempty"` + CustomCATrustCertificates *[]string `json:"customCATrustCertificates,omitempty"` + Defender *ManagedClusterSecurityProfileDefender `json:"defender,omitempty"` + ImageCleaner *ManagedClusterSecurityProfileImageCleaner `json:"imageCleaner,omitempty"` + WorkloadIdentity *ManagedClusterSecurityProfileWorkloadIdentity `json:"workloadIdentity,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofiledefender.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofiledefender.go new file mode 100644 index 00000000000..c63debde097 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofiledefender.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileDefender struct { + LogAnalyticsWorkspaceResourceId *string `json:"logAnalyticsWorkspaceResourceId,omitempty"` + SecurityMonitoring *ManagedClusterSecurityProfileDefenderSecurityMonitoring `json:"securityMonitoring,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofiledefendersecuritymonitoring.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofiledefendersecuritymonitoring.go new file mode 100644 index 00000000000..de84c795fec --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofiledefendersecuritymonitoring.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileDefenderSecurityMonitoring struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofileimagecleaner.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofileimagecleaner.go new file mode 100644 index 00000000000..eb3e2ea7007 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofileimagecleaner.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileImageCleaner struct { + Enabled *bool `json:"enabled,omitempty"` + IntervalHours *int64 `json:"intervalHours,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofileworkloadidentity.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofileworkloadidentity.go new file mode 100644 index 00000000000..84f0312767b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersecurityprofileworkloadidentity.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSecurityProfileWorkloadIdentity struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterserviceprincipalprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterserviceprincipalprofile.go new file mode 100644 index 00000000000..2a03beecdcc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterserviceprincipalprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterServicePrincipalProfile struct { + ClientId string `json:"clientId"` + Secret *string `json:"secret,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersku.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersku.go new file mode 100644 index 00000000000..a4dcf9c50d3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclustersku.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterSKU struct { + Name *ManagedClusterSKUName `json:"name,omitempty"` + Tier *ManagedClusterSKUTier `json:"tier,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstaticegressgatewayprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstaticegressgatewayprofile.go new file mode 100644 index 00000000000..943c31aec18 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstaticegressgatewayprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStaticEgressGatewayProfile struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstatus.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstatus.go new file mode 100644 index 00000000000..99ee9a9e052 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstatus.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStatus struct { + ProvisioningError *CloudErrorBody `json:"provisioningError,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofile.go new file mode 100644 index 00000000000..11073ae103d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfile struct { + BlobCSIDriver *ManagedClusterStorageProfileBlobCSIDriver `json:"blobCSIDriver,omitempty"` + DiskCSIDriver *ManagedClusterStorageProfileDiskCSIDriver `json:"diskCSIDriver,omitempty"` + FileCSIDriver *ManagedClusterStorageProfileFileCSIDriver `json:"fileCSIDriver,omitempty"` + SnapshotController *ManagedClusterStorageProfileSnapshotController `json:"snapshotController,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofileblobcsidriver.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofileblobcsidriver.go new file mode 100644 index 00000000000..dc0e951be6b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofileblobcsidriver.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileBlobCSIDriver struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilediskcsidriver.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilediskcsidriver.go new file mode 100644 index 00000000000..11eb0e5deb1 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilediskcsidriver.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileDiskCSIDriver struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilefilecsidriver.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilefilecsidriver.go new file mode 100644 index 00000000000..986ece9d933 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilefilecsidriver.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileFileCSIDriver struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilesnapshotcontroller.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilesnapshotcontroller.go new file mode 100644 index 00000000000..d46ee10b683 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterstorageprofilesnapshotcontroller.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterStorageProfileSnapshotController struct { + Enabled *bool `json:"enabled,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterupgradeprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterupgradeprofile.go new file mode 100644 index 00000000000..86274cdb14a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterupgradeprofile.go @@ -0,0 +1,11 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterUpgradeProfile struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ManagedClusterUpgradeProfileProperties `json:"properties"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterupgradeprofileproperties.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterupgradeprofileproperties.go new file mode 100644 index 00000000000..4db4b126760 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterupgradeprofileproperties.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterUpgradeProfileProperties struct { + AgentPoolProfiles []ManagedClusterPoolUpgradeProfile `json:"agentPoolProfiles"` + ControlPlaneProfile ManagedClusterPoolUpgradeProfile `json:"controlPlaneProfile"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterwindowsprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterwindowsprofile.go new file mode 100644 index 00000000000..8e3cffdace8 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterwindowsprofile.go @@ -0,0 +1,12 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWindowsProfile struct { + AdminPassword *string `json:"adminPassword,omitempty"` + AdminUsername string `json:"adminUsername"` + EnableCSIProxy *bool `json:"enableCSIProxy,omitempty"` + GmsaProfile *WindowsGmsaProfile `json:"gmsaProfile,omitempty"` + LicenseType *LicenseType `json:"licenseType,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofile.go new file mode 100644 index 00000000000..891a44a0084 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWorkloadAutoScalerProfile struct { + Keda *ManagedClusterWorkloadAutoScalerProfileKeda `json:"keda,omitempty"` + VerticalPodAutoscaler *ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler `json:"verticalPodAutoscaler,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofilekeda.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofilekeda.go new file mode 100644 index 00000000000..7115a06bfb3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofilekeda.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWorkloadAutoScalerProfileKeda struct { + Enabled bool `json:"enabled"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofileverticalpodautoscaler.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofileverticalpodautoscaler.go new file mode 100644 index 00000000000..baf9e23d43c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_managedclusterworkloadautoscalerprofileverticalpodautoscaler.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterWorkloadAutoScalerProfileVerticalPodAutoscaler struct { + Enabled bool `json:"enabled"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_manualscaleprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_manualscaleprofile.go new file mode 100644 index 00000000000..feb99dc4246 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_manualscaleprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManualScaleProfile struct { + Count *int64 `json:"count,omitempty"` + Size *string `json:"size,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevision.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevision.go new file mode 100644 index 00000000000..e867a34a526 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevision.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MeshRevision struct { + CompatibleWith *[]CompatibleVersions `json:"compatibleWith,omitempty"` + Revision *string `json:"revision,omitempty"` + Upgrades *[]string `json:"upgrades,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevisionprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevisionprofile.go new file mode 100644 index 00000000000..40b56bf28d8 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevisionprofile.go @@ -0,0 +1,16 @@ +package managedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MeshRevisionProfile struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *MeshRevisionProfileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevisionprofileproperties.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevisionprofileproperties.go new file mode 100644 index 00000000000..4d72f23dadb --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshrevisionprofileproperties.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MeshRevisionProfileProperties struct { + MeshRevisions *[]MeshRevision `json:"meshRevisions,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_meshupgradeprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshupgradeprofile.go new file mode 100644 index 00000000000..804e24cc19b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_meshupgradeprofile.go @@ -0,0 +1,16 @@ +package managedclusters + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MeshUpgradeProfile struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *MeshRevision `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_outboundenvironmentendpoint.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_outboundenvironmentendpoint.go new file mode 100644 index 00000000000..a4f0d234bc8 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_outboundenvironmentendpoint.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OutboundEnvironmentEndpoint struct { + Category *string `json:"category,omitempty"` + Endpoints *[]EndpointDependency `json:"endpoints,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_portrange.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_portrange.go new file mode 100644 index 00000000000..0a44c59e9c8 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_portrange.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PortRange struct { + PortEnd *int64 `json:"portEnd,omitempty"` + PortStart *int64 `json:"portStart,omitempty"` + Protocol *Protocol `json:"protocol,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_powerstate.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_powerstate.go new file mode 100644 index 00000000000..d5abeb15b66 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_powerstate.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PowerState struct { + Code *Code `json:"code,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_privatelinkresource.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_privatelinkresource.go new file mode 100644 index 00000000000..14d827574be --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_privatelinkresource.go @@ -0,0 +1,13 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateLinkResource struct { + GroupId *string `json:"groupId,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + PrivateLinkServiceID *string `json:"privateLinkServiceID,omitempty"` + RequiredMembers *[]string `json:"requiredMembers,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_resourcereference.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_resourcereference.go new file mode 100644 index 00000000000..dbb05bb4711 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_resourcereference.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceReference struct { + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_runcommandrequest.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_runcommandrequest.go new file mode 100644 index 00000000000..caa9ae3d0cd --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_runcommandrequest.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RunCommandRequest struct { + ClusterToken *string `json:"clusterToken,omitempty"` + Command string `json:"command"` + Context *string `json:"context,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_runcommandresult.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_runcommandresult.go new file mode 100644 index 00000000000..59cfc374de3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_runcommandresult.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RunCommandResult struct { + Id *string `json:"id,omitempty"` + Properties *CommandResultProperties `json:"properties,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_scaleprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_scaleprofile.go new file mode 100644 index 00000000000..206de580ca8 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_scaleprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ScaleProfile struct { + Manual *[]ManualScaleProfile `json:"manual,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_servicemeshprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_servicemeshprofile.go new file mode 100644 index 00000000000..781365d16f3 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_servicemeshprofile.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceMeshProfile struct { + Istio *IstioServiceMesh `json:"istio,omitempty"` + Mode ServiceMeshMode `json:"mode"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_sysctlconfig.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_sysctlconfig.go new file mode 100644 index 00000000000..00f3cc50224 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_sysctlconfig.go @@ -0,0 +1,35 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SysctlConfig struct { + FsAioMaxNr *int64 `json:"fsAioMaxNr,omitempty"` + FsFileMax *int64 `json:"fsFileMax,omitempty"` + FsInotifyMaxUserWatches *int64 `json:"fsInotifyMaxUserWatches,omitempty"` + FsNrOpen *int64 `json:"fsNrOpen,omitempty"` + KernelThreadsMax *int64 `json:"kernelThreadsMax,omitempty"` + NetCoreNetdevMaxBacklog *int64 `json:"netCoreNetdevMaxBacklog,omitempty"` + NetCoreOptmemMax *int64 `json:"netCoreOptmemMax,omitempty"` + NetCoreRmemDefault *int64 `json:"netCoreRmemDefault,omitempty"` + NetCoreRmemMax *int64 `json:"netCoreRmemMax,omitempty"` + NetCoreSomaxconn *int64 `json:"netCoreSomaxconn,omitempty"` + NetCoreWmemDefault *int64 `json:"netCoreWmemDefault,omitempty"` + NetCoreWmemMax *int64 `json:"netCoreWmemMax,omitempty"` + NetIPv4IPLocalPortRange *string `json:"netIpv4IpLocalPortRange,omitempty"` + NetIPv4NeighDefaultGcThresh1 *int64 `json:"netIpv4NeighDefaultGcThresh1,omitempty"` + NetIPv4NeighDefaultGcThresh2 *int64 `json:"netIpv4NeighDefaultGcThresh2,omitempty"` + NetIPv4NeighDefaultGcThresh3 *int64 `json:"netIpv4NeighDefaultGcThresh3,omitempty"` + NetIPv4TcpFinTimeout *int64 `json:"netIpv4TcpFinTimeout,omitempty"` + NetIPv4TcpKeepaliveProbes *int64 `json:"netIpv4TcpKeepaliveProbes,omitempty"` + NetIPv4TcpKeepaliveTime *int64 `json:"netIpv4TcpKeepaliveTime,omitempty"` + NetIPv4TcpMaxSynBacklog *int64 `json:"netIpv4TcpMaxSynBacklog,omitempty"` + NetIPv4TcpMaxTwBuckets *int64 `json:"netIpv4TcpMaxTwBuckets,omitempty"` + NetIPv4TcpTwReuse *bool `json:"netIpv4TcpTwReuse,omitempty"` + NetIPv4TcpkeepaliveIntvl *int64 `json:"netIpv4TcpkeepaliveIntvl,omitempty"` + NetNetfilterNfConntrackBuckets *int64 `json:"netNetfilterNfConntrackBuckets,omitempty"` + NetNetfilterNfConntrackMax *int64 `json:"netNetfilterNfConntrackMax,omitempty"` + VMMaxMapCount *int64 `json:"vmMaxMapCount,omitempty"` + VMSwappiness *int64 `json:"vmSwappiness,omitempty"` + VMVfsCachePressure *int64 `json:"vmVfsCachePressure,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_tagsobject.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_tagsobject.go new file mode 100644 index 00000000000..69f1454e3b1 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_tagsobject.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TagsObject struct { + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_upgradeoverridesettings.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_upgradeoverridesettings.go new file mode 100644 index 00000000000..6be73348ea0 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_upgradeoverridesettings.go @@ -0,0 +1,27 @@ +package managedclusters + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpgradeOverrideSettings struct { + ForceUpgrade *bool `json:"forceUpgrade,omitempty"` + Until *string `json:"until,omitempty"` +} + +func (o *UpgradeOverrideSettings) GetUntilAsTime() (*time.Time, error) { + if o.Until == nil { + return nil, nil + } + return dates.ParseAsFormat(o.Until, "2006-01-02T15:04:05Z07:00") +} + +func (o *UpgradeOverrideSettings) SetUntilAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.Until = &formatted +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_userassignedidentity.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_userassignedidentity.go new file mode 100644 index 00000000000..b1eecd7085a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_userassignedidentity.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UserAssignedIdentity struct { + ClientId *string `json:"clientId,omitempty"` + ObjectId *string `json:"objectId,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_virtualmachinenodes.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_virtualmachinenodes.go new file mode 100644 index 00000000000..a2e7398fd71 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_virtualmachinenodes.go @@ -0,0 +1,9 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VirtualMachineNodes struct { + Count *int64 `json:"count,omitempty"` + Size *string `json:"size,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_virtualmachinesprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_virtualmachinesprofile.go new file mode 100644 index 00000000000..b56af9fff92 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_virtualmachinesprofile.go @@ -0,0 +1,8 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VirtualMachinesProfile struct { + Scale *ScaleProfile `json:"scale,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/model_windowsgmsaprofile.go b/resource-manager/containerservice/2025-05-01/managedclusters/model_windowsgmsaprofile.go new file mode 100644 index 00000000000..742f96764d4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/model_windowsgmsaprofile.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WindowsGmsaProfile struct { + DnsServer *string `json:"dnsServer,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + RootDomainName *string `json:"rootDomainName,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/predicates.go b/resource-manager/containerservice/2025-05-01/managedclusters/predicates.go new file mode 100644 index 00000000000..3af85d2e142 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/predicates.go @@ -0,0 +1,96 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ManagedClusterOperationPredicate struct { + ETag *string + Id *string + Location *string + Name *string + Type *string +} + +func (p ManagedClusterOperationPredicate) Matches(input ManagedCluster) bool { + + if p.ETag != nil && (input.ETag == nil || *p.ETag != *input.ETag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type MeshRevisionProfileOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p MeshRevisionProfileOperationPredicate) Matches(input MeshRevisionProfile) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type MeshUpgradeProfileOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p MeshUpgradeProfileOperationPredicate) Matches(input MeshUpgradeProfile) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type OutboundEnvironmentEndpointOperationPredicate struct { + Category *string +} + +func (p OutboundEnvironmentEndpointOperationPredicate) Matches(input OutboundEnvironmentEndpoint) bool { + + if p.Category != nil && (input.Category == nil || *p.Category != *input.Category) { + return false + } + + return true +} diff --git a/resource-manager/containerservice/2025-05-01/managedclusters/version.go b/resource-manager/containerservice/2025-05-01/managedclusters/version.go new file mode 100644 index 00000000000..10b59d75e34 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/managedclusters/version.go @@ -0,0 +1,10 @@ +package managedclusters + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/managedclusters/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/README.md b/resource-manager/containerservice/2025-05-01/privateendpointconnections/README.md new file mode 100644 index 00000000000..2eaa7b42eb4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/README.md @@ -0,0 +1,86 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/privateendpointconnections` Documentation + +The `privateendpointconnections` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/privateendpointconnections" +``` + + +### Client Initialization + +```go +client := privateendpointconnections.NewPrivateEndpointConnectionsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `PrivateEndpointConnectionsClient.Delete` + +```go +ctx := context.TODO() +id := privateendpointconnections.NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "privateEndpointConnectionName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `PrivateEndpointConnectionsClient.Get` + +```go +ctx := context.TODO() +id := privateendpointconnections.NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "privateEndpointConnectionName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PrivateEndpointConnectionsClient.List` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.List(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PrivateEndpointConnectionsClient.Update` + +```go +ctx := context.TODO() +id := privateendpointconnections.NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "privateEndpointConnectionName") + +payload := privateendpointconnections.PrivateEndpointConnection{ + // ... +} + + +read, err := client.Update(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/client.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/client.go new file mode 100644 index 00000000000..50eaf09dcce --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/client.go @@ -0,0 +1,26 @@ +package privateendpointconnections + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateEndpointConnectionsClient struct { + Client *resourcemanager.Client +} + +func NewPrivateEndpointConnectionsClientWithBaseURI(sdkApi sdkEnv.Api) (*PrivateEndpointConnectionsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "privateendpointconnections", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating PrivateEndpointConnectionsClient: %+v", err) + } + + return &PrivateEndpointConnectionsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/constants.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/constants.go new file mode 100644 index 00000000000..20cf74bcad4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/constants.go @@ -0,0 +1,107 @@ +package privateendpointconnections + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionStatus string + +const ( + ConnectionStatusApproved ConnectionStatus = "Approved" + ConnectionStatusDisconnected ConnectionStatus = "Disconnected" + ConnectionStatusPending ConnectionStatus = "Pending" + ConnectionStatusRejected ConnectionStatus = "Rejected" +) + +func PossibleValuesForConnectionStatus() []string { + return []string{ + string(ConnectionStatusApproved), + string(ConnectionStatusDisconnected), + string(ConnectionStatusPending), + string(ConnectionStatusRejected), + } +} + +func (s *ConnectionStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseConnectionStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseConnectionStatus(input string) (*ConnectionStatus, error) { + vals := map[string]ConnectionStatus{ + "approved": ConnectionStatusApproved, + "disconnected": ConnectionStatusDisconnected, + "pending": ConnectionStatusPending, + "rejected": ConnectionStatusRejected, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ConnectionStatus(input) + return &out, nil +} + +type PrivateEndpointConnectionProvisioningState string + +const ( + PrivateEndpointConnectionProvisioningStateCanceled PrivateEndpointConnectionProvisioningState = "Canceled" + PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating" + PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting" + PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed" + PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded" +) + +func PossibleValuesForPrivateEndpointConnectionProvisioningState() []string { + return []string{ + string(PrivateEndpointConnectionProvisioningStateCanceled), + string(PrivateEndpointConnectionProvisioningStateCreating), + string(PrivateEndpointConnectionProvisioningStateDeleting), + string(PrivateEndpointConnectionProvisioningStateFailed), + string(PrivateEndpointConnectionProvisioningStateSucceeded), + } +} + +func (s *PrivateEndpointConnectionProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePrivateEndpointConnectionProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePrivateEndpointConnectionProvisioningState(input string) (*PrivateEndpointConnectionProvisioningState, error) { + vals := map[string]PrivateEndpointConnectionProvisioningState{ + "canceled": PrivateEndpointConnectionProvisioningStateCanceled, + "creating": PrivateEndpointConnectionProvisioningStateCreating, + "deleting": PrivateEndpointConnectionProvisioningStateDeleting, + "failed": PrivateEndpointConnectionProvisioningStateFailed, + "succeeded": PrivateEndpointConnectionProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateEndpointConnectionProvisioningState(input) + return &out, nil +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/id_privateendpointconnection.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/id_privateendpointconnection.go new file mode 100644 index 00000000000..383123bb5dc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/id_privateendpointconnection.go @@ -0,0 +1,139 @@ +package privateendpointconnections + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&PrivateEndpointConnectionId{}) +} + +var _ resourceids.ResourceId = &PrivateEndpointConnectionId{} + +// PrivateEndpointConnectionId is a struct representing the Resource ID for a Private Endpoint Connection +type PrivateEndpointConnectionId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + PrivateEndpointConnectionName string +} + +// NewPrivateEndpointConnectionID returns a new PrivateEndpointConnectionId struct +func NewPrivateEndpointConnectionID(subscriptionId string, resourceGroupName string, managedClusterName string, privateEndpointConnectionName string) PrivateEndpointConnectionId { + return PrivateEndpointConnectionId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + PrivateEndpointConnectionName: privateEndpointConnectionName, + } +} + +// ParsePrivateEndpointConnectionID parses 'input' into a PrivateEndpointConnectionId +func ParsePrivateEndpointConnectionID(input string) (*PrivateEndpointConnectionId, error) { + parser := resourceids.NewParserFromResourceIdType(&PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := PrivateEndpointConnectionId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParsePrivateEndpointConnectionIDInsensitively parses 'input' case-insensitively into a PrivateEndpointConnectionId +// note: this method should only be used for API response data and not user input +func ParsePrivateEndpointConnectionIDInsensitively(input string) (*PrivateEndpointConnectionId, error) { + parser := resourceids.NewParserFromResourceIdType(&PrivateEndpointConnectionId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := PrivateEndpointConnectionId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *PrivateEndpointConnectionId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.PrivateEndpointConnectionName, ok = input.Parsed["privateEndpointConnectionName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "privateEndpointConnectionName", input) + } + + return nil +} + +// ValidatePrivateEndpointConnectionID checks that 'input' can be parsed as a Private Endpoint Connection ID +func ValidatePrivateEndpointConnectionID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParsePrivateEndpointConnectionID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/privateEndpointConnections/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.PrivateEndpointConnectionName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticPrivateEndpointConnections", "privateEndpointConnections", "privateEndpointConnections"), + resourceids.UserSpecifiedSegment("privateEndpointConnectionName", "privateEndpointConnectionName"), + } +} + +// String returns a human-readable description of this Private Endpoint Connection ID +func (id PrivateEndpointConnectionId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Private Endpoint Connection Name: %q", id.PrivateEndpointConnectionName), + } + return fmt.Sprintf("Private Endpoint Connection (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/id_privateendpointconnection_test.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/id_privateendpointconnection_test.go new file mode 100644 index 00000000000..f180a2cbb26 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/id_privateendpointconnection_test.go @@ -0,0 +1,327 @@ +package privateendpointconnections + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &PrivateEndpointConnectionId{} + +func TestNewPrivateEndpointConnectionID(t *testing.T) { + id := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "privateEndpointConnectionName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.PrivateEndpointConnectionName != "privateEndpointConnectionName" { + t.Fatalf("Expected %q but got %q for Segment 'PrivateEndpointConnectionName'", id.PrivateEndpointConnectionName, "privateEndpointConnectionName") + } +} + +func TestFormatPrivateEndpointConnectionID(t *testing.T) { + actual := NewPrivateEndpointConnectionID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "privateEndpointConnectionName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/privateEndpointConnections/privateEndpointConnectionName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParsePrivateEndpointConnectionID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateEndpointConnectionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/privateEndpointConnections", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/privateEndpointConnections/privateEndpointConnectionName", + Expected: &PrivateEndpointConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + PrivateEndpointConnectionName: "privateEndpointConnectionName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/privateEndpointConnections/privateEndpointConnectionName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateEndpointConnectionID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) + } + + } +} + +func TestParsePrivateEndpointConnectionIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *PrivateEndpointConnectionId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/privateEndpointConnections", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/pRiVaTeEnDpOiNtCoNnEcTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/privateEndpointConnections/privateEndpointConnectionName", + Expected: &PrivateEndpointConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + PrivateEndpointConnectionName: "privateEndpointConnectionName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/privateEndpointConnections/privateEndpointConnectionName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnNaMe", + Expected: &PrivateEndpointConnectionId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + PrivateEndpointConnectionName: "pRiVaTeEnDpOiNtCoNnEcTiOnNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/pRiVaTeEnDpOiNtCoNnEcTiOnS/pRiVaTeEnDpOiNtCoNnEcTiOnNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParsePrivateEndpointConnectionIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.PrivateEndpointConnectionName != v.Expected.PrivateEndpointConnectionName { + t.Fatalf("Expected %q but got %q for PrivateEndpointConnectionName", v.Expected.PrivateEndpointConnectionName, actual.PrivateEndpointConnectionName) + } + + } +} + +func TestSegmentsForPrivateEndpointConnectionId(t *testing.T) { + segments := PrivateEndpointConnectionId{}.Segments() + if len(segments) == 0 { + t.Fatalf("PrivateEndpointConnectionId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_delete.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_delete.go new file mode 100644 index 00000000000..2b9a343bdf0 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_delete.go @@ -0,0 +1,70 @@ +package privateendpointconnections + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c PrivateEndpointConnectionsClient) Delete(ctx context.Context, id PrivateEndpointConnectionId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c PrivateEndpointConnectionsClient) DeleteThenPoll(ctx context.Context, id PrivateEndpointConnectionId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_get.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_get.go new file mode 100644 index 00000000000..351242746d4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_get.go @@ -0,0 +1,53 @@ +package privateendpointconnections + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *PrivateEndpointConnection +} + +// Get ... +func (c PrivateEndpointConnectionsClient) Get(ctx context.Context, id PrivateEndpointConnectionId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model PrivateEndpointConnection + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_list.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_list.go new file mode 100644 index 00000000000..1ed7cfe4cf6 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_list.go @@ -0,0 +1,55 @@ +package privateendpointconnections + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *PrivateEndpointConnectionListResult +} + +// List ... +func (c PrivateEndpointConnectionsClient) List(ctx context.Context, id commonids.KubernetesClusterId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/privateEndpointConnections", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model PrivateEndpointConnectionListResult + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_update.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_update.go new file mode 100644 index 00000000000..12c2adf4c15 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/method_update.go @@ -0,0 +1,58 @@ +package privateendpointconnections + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *PrivateEndpointConnection +} + +// Update ... +func (c PrivateEndpointConnectionsClient) Update(ctx context.Context, id PrivateEndpointConnectionId, input PrivateEndpointConnection) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model PrivateEndpointConnection + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpoint.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpoint.go new file mode 100644 index 00000000000..1bc8cf2a97c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpoint.go @@ -0,0 +1,8 @@ +package privateendpointconnections + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateEndpoint struct { + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnection.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnection.go new file mode 100644 index 00000000000..fde7f4db0fc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnection.go @@ -0,0 +1,11 @@ +package privateendpointconnections + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateEndpointConnection struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *PrivateEndpointConnectionProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnectionlistresult.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnectionlistresult.go new file mode 100644 index 00000000000..92a9ecb105a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnectionlistresult.go @@ -0,0 +1,8 @@ +package privateendpointconnections + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateEndpointConnectionListResult struct { + Value *[]PrivateEndpointConnection `json:"value,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnectionproperties.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnectionproperties.go new file mode 100644 index 00000000000..549d255f069 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privateendpointconnectionproperties.go @@ -0,0 +1,10 @@ +package privateendpointconnections + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateEndpointConnectionProperties struct { + PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty"` + PrivateLinkServiceConnectionState PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState"` + ProvisioningState *PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privatelinkserviceconnectionstate.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privatelinkserviceconnectionstate.go new file mode 100644 index 00000000000..677cab99f90 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/model_privatelinkserviceconnectionstate.go @@ -0,0 +1,9 @@ +package privateendpointconnections + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateLinkServiceConnectionState struct { + Description *string `json:"description,omitempty"` + Status *ConnectionStatus `json:"status,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/privateendpointconnections/version.go b/resource-manager/containerservice/2025-05-01/privateendpointconnections/version.go new file mode 100644 index 00000000000..df544cba857 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privateendpointconnections/version.go @@ -0,0 +1,10 @@ +package privateendpointconnections + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/privateendpointconnections/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/privatelinkresources/README.md b/resource-manager/containerservice/2025-05-01/privatelinkresources/README.md new file mode 100644 index 00000000000..795c0755038 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privatelinkresources/README.md @@ -0,0 +1,37 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/privatelinkresources` Documentation + +The `privatelinkresources` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/privatelinkresources" +``` + + +### Client Initialization + +```go +client := privatelinkresources.NewPrivateLinkResourcesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `PrivateLinkResourcesClient.List` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +read, err := client.List(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/containerservice/2025-05-01/privatelinkresources/client.go b/resource-manager/containerservice/2025-05-01/privatelinkresources/client.go new file mode 100644 index 00000000000..28c00b21f03 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privatelinkresources/client.go @@ -0,0 +1,26 @@ +package privatelinkresources + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateLinkResourcesClient struct { + Client *resourcemanager.Client +} + +func NewPrivateLinkResourcesClientWithBaseURI(sdkApi sdkEnv.Api) (*PrivateLinkResourcesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "privatelinkresources", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating PrivateLinkResourcesClient: %+v", err) + } + + return &PrivateLinkResourcesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/privatelinkresources/method_list.go b/resource-manager/containerservice/2025-05-01/privatelinkresources/method_list.go new file mode 100644 index 00000000000..905a02fa803 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privatelinkresources/method_list.go @@ -0,0 +1,55 @@ +package privatelinkresources + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *PrivateLinkResourcesListResult +} + +// List ... +func (c PrivateLinkResourcesClient) List(ctx context.Context, id commonids.KubernetesClusterId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/privateLinkResources", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model PrivateLinkResourcesListResult + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/privatelinkresources/model_privatelinkresource.go b/resource-manager/containerservice/2025-05-01/privatelinkresources/model_privatelinkresource.go new file mode 100644 index 00000000000..3490b3d4e87 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privatelinkresources/model_privatelinkresource.go @@ -0,0 +1,13 @@ +package privatelinkresources + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateLinkResource struct { + GroupId *string `json:"groupId,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + PrivateLinkServiceID *string `json:"privateLinkServiceID,omitempty"` + RequiredMembers *[]string `json:"requiredMembers,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/privatelinkresources/model_privatelinkresourceslistresult.go b/resource-manager/containerservice/2025-05-01/privatelinkresources/model_privatelinkresourceslistresult.go new file mode 100644 index 00000000000..52cfe88b6e2 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privatelinkresources/model_privatelinkresourceslistresult.go @@ -0,0 +1,8 @@ +package privatelinkresources + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateLinkResourcesListResult struct { + Value *[]PrivateLinkResource `json:"value,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/privatelinkresources/version.go b/resource-manager/containerservice/2025-05-01/privatelinkresources/version.go new file mode 100644 index 00000000000..a5550d0b0b6 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/privatelinkresources/version.go @@ -0,0 +1,10 @@ +package privatelinkresources + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/privatelinkresources/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/README.md b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/README.md new file mode 100644 index 00000000000..e0102a1c6f2 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/README.md @@ -0,0 +1,42 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid` Documentation + +The `resolveprivatelinkserviceid` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid" +``` + + +### Client Initialization + +```go +client := resolveprivatelinkserviceid.NewResolvePrivateLinkServiceIdClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `ResolvePrivateLinkServiceIdClient.POST` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +payload := resolveprivatelinkserviceid.PrivateLinkResource{ + // ... +} + + +read, err := client.POST(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/client.go b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/client.go new file mode 100644 index 00000000000..5682fe91725 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/client.go @@ -0,0 +1,26 @@ +package resolveprivatelinkserviceid + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResolvePrivateLinkServiceIdClient struct { + Client *resourcemanager.Client +} + +func NewResolvePrivateLinkServiceIdClientWithBaseURI(sdkApi sdkEnv.Api) (*ResolvePrivateLinkServiceIdClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "resolveprivatelinkserviceid", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating ResolvePrivateLinkServiceIdClient: %+v", err) + } + + return &ResolvePrivateLinkServiceIdClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/method_post.go b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/method_post.go new file mode 100644 index 00000000000..de55fb54ccc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/method_post.go @@ -0,0 +1,59 @@ +package resolveprivatelinkserviceid + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type POSTOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *PrivateLinkResource +} + +// POST ... +func (c ResolvePrivateLinkServiceIdClient) POST(ctx context.Context, id commonids.KubernetesClusterId, input PrivateLinkResource) (result POSTOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/resolvePrivateLinkServiceId", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model PrivateLinkResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/model_privatelinkresource.go b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/model_privatelinkresource.go new file mode 100644 index 00000000000..4ff07dba897 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/model_privatelinkresource.go @@ -0,0 +1,13 @@ +package resolveprivatelinkserviceid + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PrivateLinkResource struct { + GroupId *string `json:"groupId,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + PrivateLinkServiceID *string `json:"privateLinkServiceID,omitempty"` + RequiredMembers *[]string `json:"requiredMembers,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/version.go b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/version.go new file mode 100644 index 00000000000..f1cbda7488d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/resolveprivatelinkserviceid/version.go @@ -0,0 +1,10 @@ +package resolveprivatelinkserviceid + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/resolveprivatelinkserviceid/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/README.md b/resource-manager/containerservice/2025-05-01/snapshots/README.md new file mode 100644 index 00000000000..f44113c0a20 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/README.md @@ -0,0 +1,129 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/snapshots` Documentation + +The `snapshots` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/snapshots" +``` + + +### Client Initialization + +```go +client := snapshots.NewSnapshotsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `SnapshotsClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "snapshotName") + +payload := snapshots.Snapshot{ + // ... +} + + +read, err := client.CreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotsClient.Delete` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "snapshotName") + +read, err := client.Delete(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotsClient.Get` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "snapshotName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotsClient.List` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `SnapshotsClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := commonids.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `SnapshotsClient.UpdateTags` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "snapshotName") + +payload := snapshots.TagsObject{ + // ... +} + + +read, err := client.UpdateTags(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/containerservice/2025-05-01/snapshots/client.go b/resource-manager/containerservice/2025-05-01/snapshots/client.go new file mode 100644 index 00000000000..678a64470aa --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/client.go @@ -0,0 +1,26 @@ +package snapshots + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotsClient struct { + Client *resourcemanager.Client +} + +func NewSnapshotsClientWithBaseURI(sdkApi sdkEnv.Api) (*SnapshotsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "snapshots", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating SnapshotsClient: %+v", err) + } + + return &SnapshotsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/constants.go b/resource-manager/containerservice/2025-05-01/snapshots/constants.go new file mode 100644 index 00000000000..e797158d78c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/constants.go @@ -0,0 +1,142 @@ +package snapshots + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OSSKU string + +const ( + OSSKUAzureLinux OSSKU = "AzureLinux" + OSSKUCBLMariner OSSKU = "CBLMariner" + OSSKUUbuntu OSSKU = "Ubuntu" + OSSKUUbuntuTwoTwoZeroFour OSSKU = "Ubuntu2204" + OSSKUWindowsTwoZeroOneNine OSSKU = "Windows2019" + OSSKUWindowsTwoZeroTwoTwo OSSKU = "Windows2022" +) + +func PossibleValuesForOSSKU() []string { + return []string{ + string(OSSKUAzureLinux), + string(OSSKUCBLMariner), + string(OSSKUUbuntu), + string(OSSKUUbuntuTwoTwoZeroFour), + string(OSSKUWindowsTwoZeroOneNine), + string(OSSKUWindowsTwoZeroTwoTwo), + } +} + +func (s *OSSKU) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSSKU(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSSKU(input string) (*OSSKU, error) { + vals := map[string]OSSKU{ + "azurelinux": OSSKUAzureLinux, + "cblmariner": OSSKUCBLMariner, + "ubuntu": OSSKUUbuntu, + "ubuntu2204": OSSKUUbuntuTwoTwoZeroFour, + "windows2019": OSSKUWindowsTwoZeroOneNine, + "windows2022": OSSKUWindowsTwoZeroTwoTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSSKU(input) + return &out, nil +} + +type OSType string + +const ( + OSTypeLinux OSType = "Linux" + OSTypeWindows OSType = "Windows" +) + +func PossibleValuesForOSType() []string { + return []string{ + string(OSTypeLinux), + string(OSTypeWindows), + } +} + +func (s *OSType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseOSType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseOSType(input string) (*OSType, error) { + vals := map[string]OSType{ + "linux": OSTypeLinux, + "windows": OSTypeWindows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := OSType(input) + return &out, nil +} + +type SnapshotType string + +const ( + SnapshotTypeNodePool SnapshotType = "NodePool" +) + +func PossibleValuesForSnapshotType() []string { + return []string{ + string(SnapshotTypeNodePool), + } +} + +func (s *SnapshotType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSnapshotType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSnapshotType(input string) (*SnapshotType, error) { + vals := map[string]SnapshotType{ + "nodepool": SnapshotTypeNodePool, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SnapshotType(input) + return &out, nil +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/id_snapshot.go b/resource-manager/containerservice/2025-05-01/snapshots/id_snapshot.go new file mode 100644 index 00000000000..8081e44fc0b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/id_snapshot.go @@ -0,0 +1,130 @@ +package snapshots + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SnapshotId{}) +} + +var _ resourceids.ResourceId = &SnapshotId{} + +// SnapshotId is a struct representing the Resource ID for a Snapshot +type SnapshotId struct { + SubscriptionId string + ResourceGroupName string + SnapshotName string +} + +// NewSnapshotID returns a new SnapshotId struct +func NewSnapshotID(subscriptionId string, resourceGroupName string, snapshotName string) SnapshotId { + return SnapshotId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + SnapshotName: snapshotName, + } +} + +// ParseSnapshotID parses 'input' into a SnapshotId +func ParseSnapshotID(input string) (*SnapshotId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSnapshotIDInsensitively parses 'input' case-insensitively into a SnapshotId +// note: this method should only be used for API response data and not user input +func ParseSnapshotIDInsensitively(input string) (*SnapshotId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SnapshotId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.SnapshotName, ok = input.Parsed["snapshotName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "snapshotName", input) + } + + return nil +} + +// ValidateSnapshotID checks that 'input' can be parsed as a Snapshot ID +func ValidateSnapshotID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSnapshotID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Snapshot ID +func (id SnapshotId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/snapshots/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.SnapshotName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Snapshot ID +func (id SnapshotId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticSnapshots", "snapshots", "snapshots"), + resourceids.UserSpecifiedSegment("snapshotName", "snapshotName"), + } +} + +// String returns a human-readable description of this Snapshot ID +func (id SnapshotId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Snapshot Name: %q", id.SnapshotName), + } + return fmt.Sprintf("Snapshot (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/id_snapshot_test.go b/resource-manager/containerservice/2025-05-01/snapshots/id_snapshot_test.go new file mode 100644 index 00000000000..67f5a402cf0 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/id_snapshot_test.go @@ -0,0 +1,282 @@ +package snapshots + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SnapshotId{} + +func TestNewSnapshotID(t *testing.T) { + id := NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "snapshotName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.SnapshotName != "snapshotName" { + t.Fatalf("Expected %q but got %q for Segment 'SnapshotName'", id.SnapshotName, "snapshotName") + } +} + +func TestFormatSnapshotID(t *testing.T) { + actual := NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "snapshotName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/snapshots/snapshotName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSnapshotID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/snapshots", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/snapshots/snapshotName", + Expected: &SnapshotId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SnapshotName: "snapshotName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/snapshots/snapshotName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SnapshotName != v.Expected.SnapshotName { + t.Fatalf("Expected %q but got %q for SnapshotName", v.Expected.SnapshotName, actual.SnapshotName) + } + + } +} + +func TestParseSnapshotIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/snapshots", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/sNaPsHoTs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/snapshots/snapshotName", + Expected: &SnapshotId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SnapshotName: "snapshotName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/snapshots/snapshotName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/sNaPsHoTs/sNaPsHoTnAmE", + Expected: &SnapshotId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + SnapshotName: "sNaPsHoTnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/sNaPsHoTs/sNaPsHoTnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SnapshotName != v.Expected.SnapshotName { + t.Fatalf("Expected %q but got %q for SnapshotName", v.Expected.SnapshotName, actual.SnapshotName) + } + + } +} + +func TestSegmentsForSnapshotId(t *testing.T) { + segments := SnapshotId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SnapshotId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/method_createorupdate.go b/resource-manager/containerservice/2025-05-01/snapshots/method_createorupdate.go new file mode 100644 index 00000000000..2085af2f95b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/method_createorupdate.go @@ -0,0 +1,58 @@ +package snapshots + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Snapshot +} + +// CreateOrUpdate ... +func (c SnapshotsClient) CreateOrUpdate(ctx context.Context, id SnapshotId, input Snapshot) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Snapshot + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/method_delete.go b/resource-manager/containerservice/2025-05-01/snapshots/method_delete.go new file mode 100644 index 00000000000..575cb8a6929 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/method_delete.go @@ -0,0 +1,47 @@ +package snapshots + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c SnapshotsClient) Delete(ctx context.Context, id SnapshotId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/method_get.go b/resource-manager/containerservice/2025-05-01/snapshots/method_get.go new file mode 100644 index 00000000000..68b676f0115 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/method_get.go @@ -0,0 +1,53 @@ +package snapshots + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Snapshot +} + +// Get ... +func (c SnapshotsClient) Get(ctx context.Context, id SnapshotId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Snapshot + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/method_list.go b/resource-manager/containerservice/2025-05-01/snapshots/method_list.go new file mode 100644 index 00000000000..340f60c09b1 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/method_list.go @@ -0,0 +1,106 @@ +package snapshots + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Snapshot +} + +type ListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Snapshot +} + +type ListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// List ... +func (c SnapshotsClient) List(ctx context.Context, id commonids.SubscriptionId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.ContainerService/snapshots", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Snapshot `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListComplete retrieves all the results into a single object +func (c SnapshotsClient) ListComplete(ctx context.Context, id commonids.SubscriptionId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, SnapshotOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c SnapshotsClient) ListCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate SnapshotOperationPredicate) (result ListCompleteResult, err error) { + items := make([]Snapshot, 0) + + resp, err := c.List(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/method_listbyresourcegroup.go b/resource-manager/containerservice/2025-05-01/snapshots/method_listbyresourcegroup.go new file mode 100644 index 00000000000..fb276c706e9 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/method_listbyresourcegroup.go @@ -0,0 +1,106 @@ +package snapshots + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Snapshot +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []Snapshot +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c SnapshotsClient) ListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.ContainerService/snapshots", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Snapshot `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c SnapshotsClient) ListByResourceGroupComplete(ctx context.Context, id commonids.ResourceGroupId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, SnapshotOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c SnapshotsClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate SnapshotOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]Snapshot, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/method_updatetags.go b/resource-manager/containerservice/2025-05-01/snapshots/method_updatetags.go new file mode 100644 index 00000000000..86fd5d87f00 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/method_updatetags.go @@ -0,0 +1,57 @@ +package snapshots + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateTagsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Snapshot +} + +// UpdateTags ... +func (c SnapshotsClient) UpdateTags(ctx context.Context, id SnapshotId, input TagsObject) (result UpdateTagsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Snapshot + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/model_creationdata.go b/resource-manager/containerservice/2025-05-01/snapshots/model_creationdata.go new file mode 100644 index 00000000000..132e3459c82 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/model_creationdata.go @@ -0,0 +1,8 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreationData struct { + SourceResourceId *string `json:"sourceResourceId,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/model_snapshot.go b/resource-manager/containerservice/2025-05-01/snapshots/model_snapshot.go new file mode 100644 index 00000000000..b2feb4ccdae --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/model_snapshot.go @@ -0,0 +1,18 @@ +package snapshots + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Snapshot struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *SnapshotProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/model_snapshotproperties.go b/resource-manager/containerservice/2025-05-01/snapshots/model_snapshotproperties.go new file mode 100644 index 00000000000..962aa32598f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/model_snapshotproperties.go @@ -0,0 +1,15 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotProperties struct { + CreationData *CreationData `json:"creationData,omitempty"` + EnableFIPS *bool `json:"enableFIPS,omitempty"` + KubernetesVersion *string `json:"kubernetesVersion,omitempty"` + NodeImageVersion *string `json:"nodeImageVersion,omitempty"` + OsSku *OSSKU `json:"osSku,omitempty"` + OsType *OSType `json:"osType,omitempty"` + SnapshotType *SnapshotType `json:"snapshotType,omitempty"` + VMSize *string `json:"vmSize,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/model_tagsobject.go b/resource-manager/containerservice/2025-05-01/snapshots/model_tagsobject.go new file mode 100644 index 00000000000..30f646c1c9f --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/model_tagsobject.go @@ -0,0 +1,8 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TagsObject struct { + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/predicates.go b/resource-manager/containerservice/2025-05-01/snapshots/predicates.go new file mode 100644 index 00000000000..832503e3b26 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/predicates.go @@ -0,0 +1,32 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotOperationPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p SnapshotOperationPredicate) Matches(input Snapshot) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/containerservice/2025-05-01/snapshots/version.go b/resource-manager/containerservice/2025-05-01/snapshots/version.go new file mode 100644 index 00000000000..09e822173ed --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/snapshots/version.go @@ -0,0 +1,10 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/snapshots/2025-05-01" +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/README.md b/resource-manager/containerservice/2025-05-01/trustedaccess/README.md new file mode 100644 index 00000000000..493f6458789 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/README.md @@ -0,0 +1,100 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/trustedaccess` Documentation + +The `trustedaccess` SDK allows for interaction with Azure Resource Manager `containerservice` (API Version `2025-05-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/containerservice/2025-05-01/trustedaccess" +``` + + +### Client Initialization + +```go +client := trustedaccess.NewTrustedAccessClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `TrustedAccessClient.RoleBindingsCreateOrUpdate` + +```go +ctx := context.TODO() +id := trustedaccess.NewTrustedAccessRoleBindingID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "trustedAccessRoleBindingName") + +payload := trustedaccess.TrustedAccessRoleBinding{ + // ... +} + + +if err := client.RoleBindingsCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `TrustedAccessClient.RoleBindingsDelete` + +```go +ctx := context.TODO() +id := trustedaccess.NewTrustedAccessRoleBindingID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "trustedAccessRoleBindingName") + +if err := client.RoleBindingsDeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `TrustedAccessClient.RoleBindingsGet` + +```go +ctx := context.TODO() +id := trustedaccess.NewTrustedAccessRoleBindingID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "trustedAccessRoleBindingName") + +read, err := client.RoleBindingsGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `TrustedAccessClient.RoleBindingsList` + +```go +ctx := context.TODO() +id := commonids.NewKubernetesClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName") + +// alternatively `client.RoleBindingsList(ctx, id)` can be used to do batched pagination +items, err := client.RoleBindingsListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `TrustedAccessClient.RolesList` + +```go +ctx := context.TODO() +id := trustedaccess.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.RolesList(ctx, id)` can be used to do batched pagination +items, err := client.RolesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/client.go b/resource-manager/containerservice/2025-05-01/trustedaccess/client.go new file mode 100644 index 00000000000..e3e1d5a54e4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/client.go @@ -0,0 +1,26 @@ +package trustedaccess + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TrustedAccessClient struct { + Client *resourcemanager.Client +} + +func NewTrustedAccessClientWithBaseURI(sdkApi sdkEnv.Api) (*TrustedAccessClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "trustedaccess", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating TrustedAccessClient: %+v", err) + } + + return &TrustedAccessClient{ + Client: client, + }, nil +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/constants.go b/resource-manager/containerservice/2025-05-01/trustedaccess/constants.go new file mode 100644 index 00000000000..8ab993f971c --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/constants.go @@ -0,0 +1,60 @@ +package trustedaccess + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TrustedAccessRoleBindingProvisioningState string + +const ( + TrustedAccessRoleBindingProvisioningStateCanceled TrustedAccessRoleBindingProvisioningState = "Canceled" + TrustedAccessRoleBindingProvisioningStateDeleting TrustedAccessRoleBindingProvisioningState = "Deleting" + TrustedAccessRoleBindingProvisioningStateFailed TrustedAccessRoleBindingProvisioningState = "Failed" + TrustedAccessRoleBindingProvisioningStateSucceeded TrustedAccessRoleBindingProvisioningState = "Succeeded" + TrustedAccessRoleBindingProvisioningStateUpdating TrustedAccessRoleBindingProvisioningState = "Updating" +) + +func PossibleValuesForTrustedAccessRoleBindingProvisioningState() []string { + return []string{ + string(TrustedAccessRoleBindingProvisioningStateCanceled), + string(TrustedAccessRoleBindingProvisioningStateDeleting), + string(TrustedAccessRoleBindingProvisioningStateFailed), + string(TrustedAccessRoleBindingProvisioningStateSucceeded), + string(TrustedAccessRoleBindingProvisioningStateUpdating), + } +} + +func (s *TrustedAccessRoleBindingProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTrustedAccessRoleBindingProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTrustedAccessRoleBindingProvisioningState(input string) (*TrustedAccessRoleBindingProvisioningState, error) { + vals := map[string]TrustedAccessRoleBindingProvisioningState{ + "canceled": TrustedAccessRoleBindingProvisioningStateCanceled, + "deleting": TrustedAccessRoleBindingProvisioningStateDeleting, + "failed": TrustedAccessRoleBindingProvisioningStateFailed, + "succeeded": TrustedAccessRoleBindingProvisioningStateSucceeded, + "updating": TrustedAccessRoleBindingProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TrustedAccessRoleBindingProvisioningState(input) + return &out, nil +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/id_location.go b/resource-manager/containerservice/2025-05-01/trustedaccess/id_location.go new file mode 100644 index 00000000000..9372d26414b --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/id_location.go @@ -0,0 +1,121 @@ +package trustedaccess + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&LocationId{}) +} + +var _ resourceids.ResourceId = &LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + LocationName string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *LocationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + return nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.ContainerService/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/id_location_test.go b/resource-manager/containerservice/2025-05-01/trustedaccess/id_location_test.go new file mode 100644 index 00000000000..d75d02aa0b9 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/id_location_test.go @@ -0,0 +1,237 @@ +package trustedaccess + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.ContainerService/locations/locationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/lOcAtIoNs/lOcAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/id_trustedaccessrolebinding.go b/resource-manager/containerservice/2025-05-01/trustedaccess/id_trustedaccessrolebinding.go new file mode 100644 index 00000000000..e53842f1ccd --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/id_trustedaccessrolebinding.go @@ -0,0 +1,139 @@ +package trustedaccess + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TrustedAccessRoleBindingId{}) +} + +var _ resourceids.ResourceId = &TrustedAccessRoleBindingId{} + +// TrustedAccessRoleBindingId is a struct representing the Resource ID for a Trusted Access Role Binding +type TrustedAccessRoleBindingId struct { + SubscriptionId string + ResourceGroupName string + ManagedClusterName string + TrustedAccessRoleBindingName string +} + +// NewTrustedAccessRoleBindingID returns a new TrustedAccessRoleBindingId struct +func NewTrustedAccessRoleBindingID(subscriptionId string, resourceGroupName string, managedClusterName string, trustedAccessRoleBindingName string) TrustedAccessRoleBindingId { + return TrustedAccessRoleBindingId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedClusterName: managedClusterName, + TrustedAccessRoleBindingName: trustedAccessRoleBindingName, + } +} + +// ParseTrustedAccessRoleBindingID parses 'input' into a TrustedAccessRoleBindingId +func ParseTrustedAccessRoleBindingID(input string) (*TrustedAccessRoleBindingId, error) { + parser := resourceids.NewParserFromResourceIdType(&TrustedAccessRoleBindingId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TrustedAccessRoleBindingId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTrustedAccessRoleBindingIDInsensitively parses 'input' case-insensitively into a TrustedAccessRoleBindingId +// note: this method should only be used for API response data and not user input +func ParseTrustedAccessRoleBindingIDInsensitively(input string) (*TrustedAccessRoleBindingId, error) { + parser := resourceids.NewParserFromResourceIdType(&TrustedAccessRoleBindingId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TrustedAccessRoleBindingId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TrustedAccessRoleBindingId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedClusterName, ok = input.Parsed["managedClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedClusterName", input) + } + + if id.TrustedAccessRoleBindingName, ok = input.Parsed["trustedAccessRoleBindingName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "trustedAccessRoleBindingName", input) + } + + return nil +} + +// ValidateTrustedAccessRoleBindingID checks that 'input' can be parsed as a Trusted Access Role Binding ID +func ValidateTrustedAccessRoleBindingID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTrustedAccessRoleBindingID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Trusted Access Role Binding ID +func (id TrustedAccessRoleBindingId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters/%s/trustedAccessRoleBindings/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedClusterName, id.TrustedAccessRoleBindingName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Trusted Access Role Binding ID +func (id TrustedAccessRoleBindingId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftContainerService", "Microsoft.ContainerService", "Microsoft.ContainerService"), + resourceids.StaticSegment("staticManagedClusters", "managedClusters", "managedClusters"), + resourceids.UserSpecifiedSegment("managedClusterName", "managedClusterName"), + resourceids.StaticSegment("staticTrustedAccessRoleBindings", "trustedAccessRoleBindings", "trustedAccessRoleBindings"), + resourceids.UserSpecifiedSegment("trustedAccessRoleBindingName", "trustedAccessRoleBindingName"), + } +} + +// String returns a human-readable description of this Trusted Access Role Binding ID +func (id TrustedAccessRoleBindingId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Cluster Name: %q", id.ManagedClusterName), + fmt.Sprintf("Trusted Access Role Binding Name: %q", id.TrustedAccessRoleBindingName), + } + return fmt.Sprintf("Trusted Access Role Binding (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/id_trustedaccessrolebinding_test.go b/resource-manager/containerservice/2025-05-01/trustedaccess/id_trustedaccessrolebinding_test.go new file mode 100644 index 00000000000..bea546caaef --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/id_trustedaccessrolebinding_test.go @@ -0,0 +1,327 @@ +package trustedaccess + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TrustedAccessRoleBindingId{} + +func TestNewTrustedAccessRoleBindingID(t *testing.T) { + id := NewTrustedAccessRoleBindingID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "trustedAccessRoleBindingName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedClusterName != "managedClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedClusterName'", id.ManagedClusterName, "managedClusterName") + } + + if id.TrustedAccessRoleBindingName != "trustedAccessRoleBindingName" { + t.Fatalf("Expected %q but got %q for Segment 'TrustedAccessRoleBindingName'", id.TrustedAccessRoleBindingName, "trustedAccessRoleBindingName") + } +} + +func TestFormatTrustedAccessRoleBindingID(t *testing.T) { + actual := NewTrustedAccessRoleBindingID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedClusterName", "trustedAccessRoleBindingName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/trustedAccessRoleBindings/trustedAccessRoleBindingName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTrustedAccessRoleBindingID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TrustedAccessRoleBindingId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/trustedAccessRoleBindings", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/trustedAccessRoleBindings/trustedAccessRoleBindingName", + Expected: &TrustedAccessRoleBindingId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + TrustedAccessRoleBindingName: "trustedAccessRoleBindingName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/trustedAccessRoleBindings/trustedAccessRoleBindingName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTrustedAccessRoleBindingID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.TrustedAccessRoleBindingName != v.Expected.TrustedAccessRoleBindingName { + t.Fatalf("Expected %q but got %q for TrustedAccessRoleBindingName", v.Expected.TrustedAccessRoleBindingName, actual.TrustedAccessRoleBindingName) + } + + } +} + +func TestParseTrustedAccessRoleBindingIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TrustedAccessRoleBindingId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/trustedAccessRoleBindings", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/tRuStEdAcCeSsRoLeBiNdInGs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/trustedAccessRoleBindings/trustedAccessRoleBindingName", + Expected: &TrustedAccessRoleBindingId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedClusterName: "managedClusterName", + TrustedAccessRoleBindingName: "trustedAccessRoleBindingName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.ContainerService/managedClusters/managedClusterName/trustedAccessRoleBindings/trustedAccessRoleBindingName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/tRuStEdAcCeSsRoLeBiNdInGs/tRuStEdAcCeSsRoLeBiNdInGnAmE", + Expected: &TrustedAccessRoleBindingId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedClusterName: "mAnAgEdClUsTeRnAmE", + TrustedAccessRoleBindingName: "tRuStEdAcCeSsRoLeBiNdInGnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.cOnTaInErSeRvIcE/mAnAgEdClUsTeRs/mAnAgEdClUsTeRnAmE/tRuStEdAcCeSsRoLeBiNdInGs/tRuStEdAcCeSsRoLeBiNdInGnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTrustedAccessRoleBindingIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedClusterName != v.Expected.ManagedClusterName { + t.Fatalf("Expected %q but got %q for ManagedClusterName", v.Expected.ManagedClusterName, actual.ManagedClusterName) + } + + if actual.TrustedAccessRoleBindingName != v.Expected.TrustedAccessRoleBindingName { + t.Fatalf("Expected %q but got %q for TrustedAccessRoleBindingName", v.Expected.TrustedAccessRoleBindingName, actual.TrustedAccessRoleBindingName) + } + + } +} + +func TestSegmentsForTrustedAccessRoleBindingId(t *testing.T) { + segments := TrustedAccessRoleBindingId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TrustedAccessRoleBindingId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingscreateorupdate.go b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingscreateorupdate.go new file mode 100644 index 00000000000..682a9388eae --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingscreateorupdate.go @@ -0,0 +1,75 @@ +package trustedaccess + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RoleBindingsCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *TrustedAccessRoleBinding +} + +// RoleBindingsCreateOrUpdate ... +func (c TrustedAccessClient) RoleBindingsCreateOrUpdate(ctx context.Context, id TrustedAccessRoleBindingId, input TrustedAccessRoleBinding) (result RoleBindingsCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// RoleBindingsCreateOrUpdateThenPoll performs RoleBindingsCreateOrUpdate then polls until it's completed +func (c TrustedAccessClient) RoleBindingsCreateOrUpdateThenPoll(ctx context.Context, id TrustedAccessRoleBindingId, input TrustedAccessRoleBinding) error { + result, err := c.RoleBindingsCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing RoleBindingsCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after RoleBindingsCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingsdelete.go b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingsdelete.go new file mode 100644 index 00000000000..7107d14f6f5 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingsdelete.go @@ -0,0 +1,70 @@ +package trustedaccess + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RoleBindingsDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// RoleBindingsDelete ... +func (c TrustedAccessClient) RoleBindingsDelete(ctx context.Context, id TrustedAccessRoleBindingId) (result RoleBindingsDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// RoleBindingsDeleteThenPoll performs RoleBindingsDelete then polls until it's completed +func (c TrustedAccessClient) RoleBindingsDeleteThenPoll(ctx context.Context, id TrustedAccessRoleBindingId) error { + result, err := c.RoleBindingsDelete(ctx, id) + if err != nil { + return fmt.Errorf("performing RoleBindingsDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after RoleBindingsDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingsget.go b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingsget.go new file mode 100644 index 00000000000..d70aa4cbfb4 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingsget.go @@ -0,0 +1,53 @@ +package trustedaccess + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RoleBindingsGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *TrustedAccessRoleBinding +} + +// RoleBindingsGet ... +func (c TrustedAccessClient) RoleBindingsGet(ctx context.Context, id TrustedAccessRoleBindingId) (result RoleBindingsGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model TrustedAccessRoleBinding + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingslist.go b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingslist.go new file mode 100644 index 00000000000..8c24b744a5d --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/method_rolebindingslist.go @@ -0,0 +1,106 @@ +package trustedaccess + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RoleBindingsListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]TrustedAccessRoleBinding +} + +type RoleBindingsListCompleteResult struct { + LatestHttpResponse *http.Response + Items []TrustedAccessRoleBinding +} + +type RoleBindingsListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *RoleBindingsListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// RoleBindingsList ... +func (c TrustedAccessClient) RoleBindingsList(ctx context.Context, id commonids.KubernetesClusterId) (result RoleBindingsListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &RoleBindingsListCustomPager{}, + Path: fmt.Sprintf("%s/trustedAccessRoleBindings", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]TrustedAccessRoleBinding `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// RoleBindingsListComplete retrieves all the results into a single object +func (c TrustedAccessClient) RoleBindingsListComplete(ctx context.Context, id commonids.KubernetesClusterId) (RoleBindingsListCompleteResult, error) { + return c.RoleBindingsListCompleteMatchingPredicate(ctx, id, TrustedAccessRoleBindingOperationPredicate{}) +} + +// RoleBindingsListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c TrustedAccessClient) RoleBindingsListCompleteMatchingPredicate(ctx context.Context, id commonids.KubernetesClusterId, predicate TrustedAccessRoleBindingOperationPredicate) (result RoleBindingsListCompleteResult, err error) { + items := make([]TrustedAccessRoleBinding, 0) + + resp, err := c.RoleBindingsList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = RoleBindingsListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/method_roleslist.go b/resource-manager/containerservice/2025-05-01/trustedaccess/method_roleslist.go new file mode 100644 index 00000000000..211cbf901bc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/method_roleslist.go @@ -0,0 +1,105 @@ +package trustedaccess + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RolesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]TrustedAccessRole +} + +type RolesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []TrustedAccessRole +} + +type RolesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *RolesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// RolesList ... +func (c TrustedAccessClient) RolesList(ctx context.Context, id LocationId) (result RolesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &RolesListCustomPager{}, + Path: fmt.Sprintf("%s/trustedAccessRoles", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]TrustedAccessRole `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// RolesListComplete retrieves all the results into a single object +func (c TrustedAccessClient) RolesListComplete(ctx context.Context, id LocationId) (RolesListCompleteResult, error) { + return c.RolesListCompleteMatchingPredicate(ctx, id, TrustedAccessRoleOperationPredicate{}) +} + +// RolesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c TrustedAccessClient) RolesListCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate TrustedAccessRoleOperationPredicate) (result RolesListCompleteResult, err error) { + items := make([]TrustedAccessRole, 0) + + resp, err := c.RolesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = RolesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrole.go b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrole.go new file mode 100644 index 00000000000..a24a7344c12 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrole.go @@ -0,0 +1,10 @@ +package trustedaccess + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TrustedAccessRole struct { + Name *string `json:"name,omitempty"` + Rules *[]TrustedAccessRoleRule `json:"rules,omitempty"` + SourceResourceType *string `json:"sourceResourceType,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolebinding.go b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolebinding.go new file mode 100644 index 00000000000..36c110999f6 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolebinding.go @@ -0,0 +1,16 @@ +package trustedaccess + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TrustedAccessRoleBinding struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties TrustedAccessRoleBindingProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolebindingproperties.go b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolebindingproperties.go new file mode 100644 index 00000000000..b432519d7cc --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolebindingproperties.go @@ -0,0 +1,10 @@ +package trustedaccess + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TrustedAccessRoleBindingProperties struct { + ProvisioningState *TrustedAccessRoleBindingProvisioningState `json:"provisioningState,omitempty"` + Roles []string `json:"roles"` + SourceResourceId string `json:"sourceResourceId"` +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolerule.go b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolerule.go new file mode 100644 index 00000000000..808af096d0a --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/model_trustedaccessrolerule.go @@ -0,0 +1,12 @@ +package trustedaccess + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TrustedAccessRoleRule struct { + ApiGroups *[]string `json:"apiGroups,omitempty"` + NonResourceURLs *[]string `json:"nonResourceURLs,omitempty"` + ResourceNames *[]string `json:"resourceNames,omitempty"` + Resources *[]string `json:"resources,omitempty"` + Verbs *[]string `json:"verbs,omitempty"` +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/predicates.go b/resource-manager/containerservice/2025-05-01/trustedaccess/predicates.go new file mode 100644 index 00000000000..6bab96a78b7 --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/predicates.go @@ -0,0 +1,45 @@ +package trustedaccess + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TrustedAccessRoleOperationPredicate struct { + Name *string + SourceResourceType *string +} + +func (p TrustedAccessRoleOperationPredicate) Matches(input TrustedAccessRole) bool { + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.SourceResourceType != nil && (input.SourceResourceType == nil || *p.SourceResourceType != *input.SourceResourceType) { + return false + } + + return true +} + +type TrustedAccessRoleBindingOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p TrustedAccessRoleBindingOperationPredicate) Matches(input TrustedAccessRoleBinding) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/containerservice/2025-05-01/trustedaccess/version.go b/resource-manager/containerservice/2025-05-01/trustedaccess/version.go new file mode 100644 index 00000000000..b5b0b6cd76e --- /dev/null +++ b/resource-manager/containerservice/2025-05-01/trustedaccess/version.go @@ -0,0 +1,10 @@ +package trustedaccess + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-05-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/trustedaccess/2025-05-01" +} diff --git a/resource-manager/datamigration/2025-06-30/client.go b/resource-manager/datamigration/2025-06-30/client.go new file mode 100644 index 00000000000..97d0b10c5ed --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/client.go @@ -0,0 +1,172 @@ +package v2025_06_30 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/customoperation" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/databasemigrations" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/delete" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/fieresource" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/fileresource" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/get" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/migrationservices" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/patch" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/post" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/projectresource" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/put" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/serviceresource" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/servicetaskresource" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/sqlmigrationservices" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/standardoperation" + "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/taskresource" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +type Client struct { + CustomOperation *customoperation.CustomOperationClient + DELETE *delete.DELETEClient + DatabaseMigrations *databasemigrations.DatabaseMigrationsClient + DatabaseMigrationsSqlVM *databasemigrationssqlvm.DatabaseMigrationsSqlVMClient + FieResource *fieresource.FieResourceClient + FileResource *fileresource.FileResourceClient + GET *get.GETClient + MigrationServices *migrationservices.MigrationServicesClient + PATCH *patch.PATCHClient + POST *post.POSTClient + PUT *put.PUTClient + ProjectResource *projectresource.ProjectResourceClient + ServiceResource *serviceresource.ServiceResourceClient + ServiceTaskResource *servicetaskresource.ServiceTaskResourceClient + SqlMigrationServices *sqlmigrationservices.SqlMigrationServicesClient + StandardOperation *standardoperation.StandardOperationClient + TaskResource *taskresource.TaskResourceClient +} + +func NewClientWithBaseURI(sdkApi sdkEnv.Api, configureFunc func(c *resourcemanager.Client)) (*Client, error) { + customOperationClient, err := customoperation.NewCustomOperationClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building CustomOperation client: %+v", err) + } + configureFunc(customOperationClient.Client) + + dELETEClient, err := delete.NewDELETEClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building DELETE client: %+v", err) + } + configureFunc(dELETEClient.Client) + + databaseMigrationsClient, err := databasemigrations.NewDatabaseMigrationsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building DatabaseMigrations client: %+v", err) + } + configureFunc(databaseMigrationsClient.Client) + + databaseMigrationsSqlVMClient, err := databasemigrationssqlvm.NewDatabaseMigrationsSqlVMClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building DatabaseMigrationsSqlVM client: %+v", err) + } + configureFunc(databaseMigrationsSqlVMClient.Client) + + fieResourceClient, err := fieresource.NewFieResourceClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building FieResource client: %+v", err) + } + configureFunc(fieResourceClient.Client) + + fileResourceClient, err := fileresource.NewFileResourceClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building FileResource client: %+v", err) + } + configureFunc(fileResourceClient.Client) + + gETClient, err := get.NewGETClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building GET client: %+v", err) + } + configureFunc(gETClient.Client) + + migrationServicesClient, err := migrationservices.NewMigrationServicesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building MigrationServices client: %+v", err) + } + configureFunc(migrationServicesClient.Client) + + pATCHClient, err := patch.NewPATCHClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building PATCH client: %+v", err) + } + configureFunc(pATCHClient.Client) + + pOSTClient, err := post.NewPOSTClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building POST client: %+v", err) + } + configureFunc(pOSTClient.Client) + + pUTClient, err := put.NewPUTClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building PUT client: %+v", err) + } + configureFunc(pUTClient.Client) + + projectResourceClient, err := projectresource.NewProjectResourceClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building ProjectResource client: %+v", err) + } + configureFunc(projectResourceClient.Client) + + serviceResourceClient, err := serviceresource.NewServiceResourceClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building ServiceResource client: %+v", err) + } + configureFunc(serviceResourceClient.Client) + + serviceTaskResourceClient, err := servicetaskresource.NewServiceTaskResourceClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building ServiceTaskResource client: %+v", err) + } + configureFunc(serviceTaskResourceClient.Client) + + sqlMigrationServicesClient, err := sqlmigrationservices.NewSqlMigrationServicesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building SqlMigrationServices client: %+v", err) + } + configureFunc(sqlMigrationServicesClient.Client) + + standardOperationClient, err := standardoperation.NewStandardOperationClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building StandardOperation client: %+v", err) + } + configureFunc(standardOperationClient.Client) + + taskResourceClient, err := taskresource.NewTaskResourceClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building TaskResource client: %+v", err) + } + configureFunc(taskResourceClient.Client) + + return &Client{ + CustomOperation: customOperationClient, + DELETE: dELETEClient, + DatabaseMigrations: databaseMigrationsClient, + DatabaseMigrationsSqlVM: databaseMigrationsSqlVMClient, + FieResource: fieResourceClient, + FileResource: fileResourceClient, + GET: gETClient, + MigrationServices: migrationServicesClient, + PATCH: pATCHClient, + POST: pOSTClient, + PUT: pUTClient, + ProjectResource: projectResourceClient, + ServiceResource: serviceResourceClient, + ServiceTaskResource: serviceTaskResourceClient, + SqlMigrationServices: sqlMigrationServicesClient, + StandardOperation: standardOperationClient, + TaskResource: taskResourceClient, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/README.md b/resource-manager/datamigration/2025-06-30/customoperation/README.md new file mode 100644 index 00000000000..811fbd3706f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/README.md @@ -0,0 +1,134 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/customoperation` Documentation + +The `customoperation` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/customoperation" +``` + + +### Client Initialization + +```go +client := customoperation.NewCustomOperationClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `CustomOperationClient.ServiceTasksCancel` + +```go +ctx := context.TODO() +id := customoperation.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksCancel(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `CustomOperationClient.ServicesCheckChildrenNameAvailability` + +```go +ctx := context.TODO() +id := customoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := customoperation.NameAvailabilityRequest{ + // ... +} + + +read, err := client.ServicesCheckChildrenNameAvailability(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `CustomOperationClient.ServicesCheckStatus` + +```go +ctx := context.TODO() +id := customoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +read, err := client.ServicesCheckStatus(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `CustomOperationClient.ServicesStart` + +```go +ctx := context.TODO() +id := customoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesStartThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `CustomOperationClient.ServicesStop` + +```go +ctx := context.TODO() +id := customoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesStopThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `CustomOperationClient.TasksCancel` + +```go +ctx := context.TODO() +id := customoperation.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksCancel(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `CustomOperationClient.TasksCommand` + +```go +ctx := context.TODO() +id := customoperation.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := customoperation.CommandProperties{ + // ... +} + + +read, err := client.TasksCommand(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/customoperation/client.go b/resource-manager/datamigration/2025-06-30/customoperation/client.go new file mode 100644 index 00000000000..dd76465d0b7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/client.go @@ -0,0 +1,26 @@ +package customoperation + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CustomOperationClient struct { + Client *resourcemanager.Client +} + +func NewCustomOperationClientWithBaseURI(sdkApi sdkEnv.Api) (*CustomOperationClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "customoperation", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating CustomOperationClient: %+v", err) + } + + return &CustomOperationClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/constants.go b/resource-manager/datamigration/2025-06-30/customoperation/constants.go new file mode 100644 index 00000000000..9b710e2ada2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/constants.go @@ -0,0 +1,2146 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type NameCheckFailureReason string + +const ( + NameCheckFailureReasonAlreadyExists NameCheckFailureReason = "AlreadyExists" + NameCheckFailureReasonInvalid NameCheckFailureReason = "Invalid" +) + +func PossibleValuesForNameCheckFailureReason() []string { + return []string{ + string(NameCheckFailureReasonAlreadyExists), + string(NameCheckFailureReasonInvalid), + } +} + +func (s *NameCheckFailureReason) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNameCheckFailureReason(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNameCheckFailureReason(input string) (*NameCheckFailureReason, error) { + vals := map[string]NameCheckFailureReason{ + "alreadyexists": NameCheckFailureReasonAlreadyExists, + "invalid": NameCheckFailureReasonInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NameCheckFailureReason(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/id_service.go b/resource-manager/datamigration/2025-06-30/customoperation/id_service.go new file mode 100644 index 00000000000..cb1fbd94832 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/id_service.go @@ -0,0 +1,130 @@ +package customoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/id_service_test.go b/resource-manager/datamigration/2025-06-30/customoperation/id_service_test.go new file mode 100644 index 00000000000..e9db6b476c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/id_service_test.go @@ -0,0 +1,282 @@ +package customoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/id_servicetask.go b/resource-manager/datamigration/2025-06-30/customoperation/id_servicetask.go new file mode 100644 index 00000000000..e21ff7d7161 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/id_servicetask.go @@ -0,0 +1,139 @@ +package customoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/customoperation/id_servicetask_test.go new file mode 100644 index 00000000000..0cc6f0465be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/id_servicetask_test.go @@ -0,0 +1,327 @@ +package customoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/id_task.go b/resource-manager/datamigration/2025-06-30/customoperation/id_task.go new file mode 100644 index 00000000000..d95d276a9d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/id_task.go @@ -0,0 +1,148 @@ +package customoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/id_task_test.go b/resource-manager/datamigration/2025-06-30/customoperation/id_task_test.go new file mode 100644 index 00000000000..fe497f84497 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/id_task_test.go @@ -0,0 +1,372 @@ +package customoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/method_servicescheckchildrennameavailability.go b/resource-manager/datamigration/2025-06-30/customoperation/method_servicescheckchildrennameavailability.go new file mode 100644 index 00000000000..728683fe0a2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/method_servicescheckchildrennameavailability.go @@ -0,0 +1,58 @@ +package customoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCheckChildrenNameAvailabilityOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *NameAvailabilityResponse +} + +// ServicesCheckChildrenNameAvailability ... +func (c CustomOperationClient) ServicesCheckChildrenNameAvailability(ctx context.Context, id ServiceId, input NameAvailabilityRequest) (result ServicesCheckChildrenNameAvailabilityOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkNameAvailability", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model NameAvailabilityResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/method_servicescheckstatus.go b/resource-manager/datamigration/2025-06-30/customoperation/method_servicescheckstatus.go new file mode 100644 index 00000000000..c829bc827ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/method_servicescheckstatus.go @@ -0,0 +1,54 @@ +package customoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCheckStatusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationServiceStatusResponse +} + +// ServicesCheckStatus ... +func (c CustomOperationClient) ServicesCheckStatus(ctx context.Context, id ServiceId) (result ServicesCheckStatusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkStatus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataMigrationServiceStatusResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/method_servicesstart.go b/resource-manager/datamigration/2025-06-30/customoperation/method_servicesstart.go new file mode 100644 index 00000000000..c1ab2fd9e59 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/method_servicesstart.go @@ -0,0 +1,70 @@ +package customoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesStartOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ServicesStart ... +func (c CustomOperationClient) ServicesStart(ctx context.Context, id ServiceId) (result ServicesStartOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/start", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesStartThenPoll performs ServicesStart then polls until it's completed +func (c CustomOperationClient) ServicesStartThenPoll(ctx context.Context, id ServiceId) error { + result, err := c.ServicesStart(ctx, id) + if err != nil { + return fmt.Errorf("performing ServicesStart: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesStart: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/method_servicesstop.go b/resource-manager/datamigration/2025-06-30/customoperation/method_servicesstop.go new file mode 100644 index 00000000000..f1820f1d0fa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/method_servicesstop.go @@ -0,0 +1,70 @@ +package customoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesStopOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ServicesStop ... +func (c CustomOperationClient) ServicesStop(ctx context.Context, id ServiceId) (result ServicesStopOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/stop", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesStopThenPoll performs ServicesStop then polls until it's completed +func (c CustomOperationClient) ServicesStopThenPoll(ctx context.Context, id ServiceId) error { + result, err := c.ServicesStop(ctx, id) + if err != nil { + return fmt.Errorf("performing ServicesStop: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesStop: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/method_servicetaskscancel.go b/resource-manager/datamigration/2025-06-30/customoperation/method_servicetaskscancel.go new file mode 100644 index 00000000000..37dcf7c1615 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/method_servicetaskscancel.go @@ -0,0 +1,54 @@ +package customoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksCancelOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksCancel ... +func (c CustomOperationClient) ServiceTasksCancel(ctx context.Context, id ServiceTaskId) (result ServiceTasksCancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/method_taskscancel.go b/resource-manager/datamigration/2025-06-30/customoperation/method_taskscancel.go new file mode 100644 index 00000000000..679b12ac906 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/method_taskscancel.go @@ -0,0 +1,54 @@ +package customoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCancelOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksCancel ... +func (c CustomOperationClient) TasksCancel(ctx context.Context, id TaskId) (result TasksCancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/method_taskscommand.go b/resource-manager/datamigration/2025-06-30/customoperation/method_taskscommand.go new file mode 100644 index 00000000000..5dad19081f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/method_taskscommand.go @@ -0,0 +1,63 @@ +package customoperation + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCommandOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model CommandProperties +} + +// TasksCommand ... +func (c CustomOperationClient) TasksCommand(ctx context.Context, id TaskId, input CommandProperties) (result TasksCommandOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/command", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var respObj json.RawMessage + if err = resp.Unmarshal(&respObj); err != nil { + return + } + model, err := UnmarshalCommandPropertiesImplementation(respObj) + if err != nil { + return + } + result.Model = model + + return +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/customoperation/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..703b6ac3aa5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_backupfileinfo.go new file mode 100644 index 00000000000..23ce82e40c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_backupsetinfo.go new file mode 100644 index 00000000000..8c975bc61dd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_blobshare.go b/resource-manager/datamigration/2025-06-30/customoperation/model_blobshare.go new file mode 100644 index 00000000000..da419473145 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_blobshare.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_commandproperties.go new file mode 100644 index 00000000000..3ab2fbf32a3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_commandproperties.go @@ -0,0 +1,85 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connectioninfo.go new file mode 100644 index 00000000000..b900b18932b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connectioninfo.go @@ -0,0 +1,117 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "mongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..e837fb78f10 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..940c6010616 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..e59c3959100 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..e25b7500a93 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..216b7d0c713 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..b74745c7cc7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..9b00fcfe5c9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..b09344d38a8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..bf76dd8acd7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..2c74fa5ac78 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..5ac01c4a510 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..7d9360be7a7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..ac87ef0e5a1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..1dbebad60ef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..2dace278b19 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..2760eb441b2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..488b2f54ac6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..0cc07bc748e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..b1a5740c98f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..d590f457e2b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..bdcd634e6f9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..eff973fddc3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..974637512a8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..1cfa33089cf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..c2704c435f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..e8ba6335874 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..416c1981237 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..f616307cb00 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..e360cf81479 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..13704f159a5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..95bbea04c44 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..8be5e98ab02 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..f2af71f1ba7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..e2d548b99fc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..1eca7ae9dcb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..62a477e7194 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..7360bb314cb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..70fc3459cf6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..ebe0b61e1fd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_databasebackupinfo.go new file mode 100644 index 00000000000..435f581a232 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_databasefileinfo.go new file mode 100644 index 00000000000..fad6a22a9d7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_databasesummaryresult.go new file mode 100644 index 00000000000..ec9a05dd159 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_databasetable.go b/resource-manager/datamigration/2025-06-30/customoperation/model_databasetable.go new file mode 100644 index 00000000000..d197e51de64 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_databasetable.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..317cd596af1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..c0dc75ca00d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_datamigrationservicestatusresponse.go b/resource-manager/datamigration/2025-06-30/customoperation/model_datamigrationservicestatusresponse.go new file mode 100644 index 00000000000..86f7069fd77 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_datamigrationservicestatusresponse.go @@ -0,0 +1,12 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceStatusResponse struct { + AgentConfiguration *interface{} `json:"agentConfiguration,omitempty"` + AgentVersion *string `json:"agentVersion,omitempty"` + Status *string `json:"status,omitempty"` + SupportedTaskTypes *[]string `json:"supportedTaskTypes,omitempty"` + VMSize *string `json:"vmSize,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/customoperation/model_executionstatistics.go new file mode 100644 index 00000000000..cabaf9d20f9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_executionstatistics.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_fileshare.go b/resource-manager/datamigration/2025-06-30/customoperation/model_fileshare.go new file mode 100644 index 00000000000..8fa6d624f18 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_fileshare.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..44c83e7c60f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..61917899ec6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..fa816ffead1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..c6a1ab00878 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..d77f995f128 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..2259050aa4d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..32debca660f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..9c02acb2a27 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..01ec509a4fa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..2605236c3ea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..81a97eb5b8f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..7d063477369 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..2b090e555d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..09cfdf8c6b9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..89e0628a2b0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..55c117b1f45 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..6998d59bbc1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..6be328b964b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..a2a1373b2c3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..6352a70332a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..387c74d1186 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..574168d2f3a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..9bbcdf2b023 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..3f26200b69d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..924017aaf44 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..883a82d3fa3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..6e4eb8bb832 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..f66a29ab8e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..2e5518abab1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..c61d9ec8282 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..c8811c3eb31 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..974cde64937 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..d7346685189 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..903c652abf5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..e3461ba50de --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..b19fdc6d0af --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..20c3e6c4f6f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..5bcffba46c3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..564c42d7212 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..12b2dc5ef48 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..0e705b094cf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..59cd6a3041c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..a5d397592b9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..808a3304302 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..bac1ac398eb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..9c5d6b54ec3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..27ed75db263 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..6b7c5adc457 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..86a08c5d79a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..6fcae0bbb2c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..7a64863b036 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..e39f7e9c3a0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..3b45a7f72e2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..ff2c98f73bf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..24dab73ec91 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..ea0b4977de5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..57b87beb271 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..b719ce9f771 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..23c202ef35a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..5332c5e851d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..dbd8ef520a5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..79b9f364a9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..7c4fc965308 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..c424294b7e5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..ff5e3e40bce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..ccb2b869ac1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..1df03e4a8b5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..8a9026ab31e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..e628b27bd9a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..b4194039baa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..e5c65363dd0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..0b15f82c375 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..c8889d13f41 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..ee4fa98fa26 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..08d70aa58f3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..3aff147d09d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..0b8ad4a189a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..2178922e271 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..3d74bc30240 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..bc05d624e7a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..2ae6a2775ba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..cdf65791f4d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..77a7cf3fd7e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..354e78e553d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..1b238962d33 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..c4eff672cd4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..76200e4ff3c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..11e7ef2f16f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..c7ec7523b0b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..c2a3c98936e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..103c0b7f764 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..eff948991f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskinput.go new file mode 100644 index 00000000000..b8932cd1be6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutput.go new file mode 100644 index 00000000000..b4a941401c8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..e5631fd6fd3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..20edc6eccae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskproperties.go new file mode 100644 index 00000000000..a7137f40ea7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..b559284aa32 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..f8ad7c32c57 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..380e3facfbe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..df4245588be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationreportresult.go new file mode 100644 index 00000000000..ae2df73d161 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..127c8bf6d99 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package customoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..a94144318ea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationresult.go new file mode 100644 index 00000000000..4ce3a2753d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..756f47b192a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..0be815af2ce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..199a6afd823 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..29b8e6960f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..9a62fdc96b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..6183bc078d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "mongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..d2f1c366974 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..35abb846849 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..40d31d6e142 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodberror.go new file mode 100644 index 00000000000..38a221bf547 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodberror.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..7a567a8451c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..f9d785be1f1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbprogress.go new file mode 100644 index 00000000000..3ac1603f8a4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..8bb41d45aaf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..c95302169c0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..0bc41865805 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..3339bf94595 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..29257dad126 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_nameavailabilityrequest.go b/resource-manager/datamigration/2025-06-30/customoperation/model_nameavailabilityrequest.go new file mode 100644 index 00000000000..46c2ac9e375 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_nameavailabilityrequest.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NameAvailabilityRequest struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_nameavailabilityresponse.go b/resource-manager/datamigration/2025-06-30/customoperation/model_nameavailabilityresponse.go new file mode 100644 index 00000000000..2e75629fd48 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_nameavailabilityresponse.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NameAvailabilityResponse struct { + Message *string `json:"message,omitempty"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason *NameCheckFailureReason `json:"reason,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_odataerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_odataerror.go new file mode 100644 index 00000000000..991aa14536d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_odataerror.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..cae15d84d29 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_orphaneduserinfo.go new file mode 100644 index 00000000000..4a5b29638d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..0dfab30bebd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_projecttask.go b/resource-manager/datamigration/2025-06-30/customoperation/model_projecttask.go new file mode 100644 index 00000000000..3d5006b9f13 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_projecttask.go @@ -0,0 +1,56 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_projecttaskproperties.go new file mode 100644 index 00000000000..2274b1f199a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package customoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..3a9975c5bc1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_queryexecutionresult.go new file mode 100644 index 00000000000..bb4ef05ce25 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/customoperation/model_reportableexception.go new file mode 100644 index 00000000000..14a405e6e6e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_reportableexception.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..9bdee9bd818 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/customoperation/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..d08acd5afd1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_selectedcertificateinput.go new file mode 100644 index 00000000000..d5742b271fd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_serverproperties.go new file mode 100644 index 00000000000..738da9707d3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_serverproperties.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..c40a5118908 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..e6837b57e1a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/customoperation/model_ssismigrationinfo.go new file mode 100644 index 00000000000..01665bbf440 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/customoperation/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..ba0af741bba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/customoperation/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..ed07058797b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..0f2711b6611 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..36150608c1a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..290743ab4d3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..e6c621ebba3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..a9133981eee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..4af61b870f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..8a0c97f2795 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..fc79443c7ca --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package customoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..ef768a6e11a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..4b6a378e9f2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..b927fd7cb07 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_validationerror.go b/resource-manager/datamigration/2025-06-30/customoperation/model_validationerror.go new file mode 100644 index 00000000000..d70b8c464f2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_validationerror.go @@ -0,0 +1,9 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/customoperation/model_waitstatistics.go new file mode 100644 index 00000000000..3ccd65e945a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/model_waitstatistics.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/customoperation/version.go b/resource-manager/datamigration/2025-06-30/customoperation/version.go new file mode 100644 index 00000000000..717c854372d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/customoperation/version.go @@ -0,0 +1,10 @@ +package customoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/customoperation/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/README.md b/resource-manager/datamigration/2025-06-30/databasemigrations/README.md new file mode 100644 index 00000000000..8b086c68e23 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/README.md @@ -0,0 +1,369 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/databasemigrations` Documentation + +The `databasemigrations` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/databasemigrations" +``` + + +### Client Initialization + +```go +client := databasemigrations.NewDatabaseMigrationsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbRUMongoCreate` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseAccountProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName", "databaseMigrationName") + +payload := databasemigrations.DatabaseMigrationCosmosDbMongo{ + // ... +} + + +if err := client.MongoToCosmosDbRUMongoCreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbRUMongoDelete` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseAccountProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName", "databaseMigrationName") + +if err := client.MongoToCosmosDbRUMongoDeleteThenPoll(ctx, id, databasemigrations.DefaultMongoToCosmosDbRUMongoDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbRUMongoGet` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseAccountProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName", "databaseMigrationName") + +read, err := client.MongoToCosmosDbRUMongoGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbRUMongoGetForScope` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName") + +// alternatively `client.MongoToCosmosDbRUMongoGetForScope(ctx, id)` can be used to do batched pagination +items, err := client.MongoToCosmosDbRUMongoGetForScopeComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbvCoreMongoCreate` + +```go +ctx := context.TODO() +id := databasemigrations.NewMongoClusterProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName", "databaseMigrationName") + +payload := databasemigrations.DatabaseMigrationCosmosDbMongo{ + // ... +} + + +if err := client.MongoToCosmosDbvCoreMongoCreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbvCoreMongoDelete` + +```go +ctx := context.TODO() +id := databasemigrations.NewMongoClusterProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName", "databaseMigrationName") + +if err := client.MongoToCosmosDbvCoreMongoDeleteThenPoll(ctx, id, databasemigrations.DefaultMongoToCosmosDbvCoreMongoDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbvCoreMongoGet` + +```go +ctx := context.TODO() +id := databasemigrations.NewMongoClusterProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName", "databaseMigrationName") + +read, err := client.MongoToCosmosDbvCoreMongoGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DatabaseMigrationsClient.MongoToCosmosDbvCoreMongoGetForScope` + +```go +ctx := context.TODO() +id := databasemigrations.NewMongoClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName") + +// alternatively `client.MongoToCosmosDbvCoreMongoGetForScope(ctx, id)` can be used to do batched pagination +items, err := client.MongoToCosmosDbvCoreMongoGetForScopeComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlDbCreateOrUpdate` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "serverName", "databaseMigrationName") + +payload := databasemigrations.DatabaseMigrationSqlDb{ + // ... +} + + +if err := client.SqlDbCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlDbDelete` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "serverName", "databaseMigrationName") + +if err := client.SqlDbDeleteThenPoll(ctx, id, databasemigrations.DefaultSqlDbDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlDbGet` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "serverName", "databaseMigrationName") + +read, err := client.SqlDbGet(ctx, id, databasemigrations.DefaultSqlDbGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlDbcancel` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "serverName", "databaseMigrationName") + +payload := databasemigrations.MigrationOperationInput{ + // ... +} + + +if err := client.SqlDbcancelThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlDbretry` + +```go +ctx := context.TODO() +id := databasemigrations.NewDatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "serverName", "databaseMigrationName") + +payload := databasemigrations.MigrationOperationInput{ + // ... +} + + +if err := client.SqlDbretryThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlMiCreateOrUpdate` + +```go +ctx := context.TODO() +id := databasemigrations.NewProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedInstanceName", "databaseMigrationName") + +payload := databasemigrations.DatabaseMigrationSqlMi{ + // ... +} + + +if err := client.SqlMiCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlMiDelete` + +```go +ctx := context.TODO() +id := databasemigrations.NewProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedInstanceName", "databaseMigrationName") + +if err := client.SqlMiDeleteThenPoll(ctx, id, databasemigrations.DefaultSqlMiDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlMiGet` + +```go +ctx := context.TODO() +id := databasemigrations.NewProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedInstanceName", "databaseMigrationName") + +read, err := client.SqlMiGet(ctx, id, databasemigrations.DefaultSqlMiGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlMicancel` + +```go +ctx := context.TODO() +id := databasemigrations.NewProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedInstanceName", "databaseMigrationName") + +payload := databasemigrations.MigrationOperationInput{ + // ... +} + + +if err := client.SqlMicancelThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlMicutover` + +```go +ctx := context.TODO() +id := databasemigrations.NewProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedInstanceName", "databaseMigrationName") + +payload := databasemigrations.MigrationOperationInput{ + // ... +} + + +if err := client.SqlMicutoverThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlVMCreateOrUpdate` + +```go +ctx := context.TODO() +id := databasemigrations.NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName") + +payload := databasemigrations.DatabaseMigrationSqlVM{ + // ... +} + + +if err := client.SqlVMCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlVMGet` + +```go +ctx := context.TODO() +id := databasemigrations.NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName") + +read, err := client.SqlVMGet(ctx, id, databasemigrations.DefaultSqlVMGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlVMcancel` + +```go +ctx := context.TODO() +id := databasemigrations.NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName") + +payload := databasemigrations.MigrationOperationInput{ + // ... +} + + +if err := client.SqlVMcancelThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `DatabaseMigrationsClient.SqlVMcutover` + +```go +ctx := context.TODO() +id := databasemigrations.NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName") + +payload := databasemigrations.MigrationOperationInput{ + // ... +} + + +if err := client.SqlVMcutoverThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/client.go b/resource-manager/datamigration/2025-06-30/databasemigrations/client.go new file mode 100644 index 00000000000..8b027441078 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/client.go @@ -0,0 +1,26 @@ +package databasemigrations + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationsClient struct { + Client *resourcemanager.Client +} + +func NewDatabaseMigrationsClientWithBaseURI(sdkApi sdkEnv.Api) (*DatabaseMigrationsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "databasemigrations", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating DatabaseMigrationsClient: %+v", err) + } + + return &DatabaseMigrationsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/constants.go b/resource-manager/datamigration/2025-06-30/databasemigrations/constants.go new file mode 100644 index 00000000000..55341d28ff5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/constants.go @@ -0,0 +1,198 @@ +package databasemigrations + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthType string + +const ( + AuthTypeAccountKey AuthType = "AccountKey" + AuthTypeManagedIdentity AuthType = "ManagedIdentity" +) + +func PossibleValuesForAuthType() []string { + return []string{ + string(AuthTypeAccountKey), + string(AuthTypeManagedIdentity), + } +} + +func (s *AuthType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthType(input string) (*AuthType, error) { + vals := map[string]AuthType{ + "accountkey": AuthTypeAccountKey, + "managedidentity": AuthTypeManagedIdentity, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthType(input) + return &out, nil +} + +type MongoMigrationStatus string + +const ( + MongoMigrationStatusCanceled MongoMigrationStatus = "Canceled" + MongoMigrationStatusCompleted MongoMigrationStatus = "Completed" + MongoMigrationStatusFailed MongoMigrationStatus = "Failed" + MongoMigrationStatusInProgress MongoMigrationStatus = "InProgress" + MongoMigrationStatusNotStarted MongoMigrationStatus = "NotStarted" +) + +func PossibleValuesForMongoMigrationStatus() []string { + return []string{ + string(MongoMigrationStatusCanceled), + string(MongoMigrationStatusCompleted), + string(MongoMigrationStatusFailed), + string(MongoMigrationStatusInProgress), + string(MongoMigrationStatusNotStarted), + } +} + +func (s *MongoMigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoMigrationStatus(input string) (*MongoMigrationStatus, error) { + vals := map[string]MongoMigrationStatus{ + "canceled": MongoMigrationStatusCanceled, + "completed": MongoMigrationStatusCompleted, + "failed": MongoMigrationStatusFailed, + "inprogress": MongoMigrationStatusInProgress, + "notstarted": MongoMigrationStatusNotStarted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoMigrationStatus(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type ResourceType string + +const ( + ResourceTypeMongoToCosmosDbMongo ResourceType = "MongoToCosmosDbMongo" + ResourceTypeSqlDb ResourceType = "SqlDb" + ResourceTypeSqlMi ResourceType = "SqlMi" + ResourceTypeSqlVM ResourceType = "SqlVm" +) + +func PossibleValuesForResourceType() []string { + return []string{ + string(ResourceTypeMongoToCosmosDbMongo), + string(ResourceTypeSqlDb), + string(ResourceTypeSqlMi), + string(ResourceTypeSqlVM), + } +} + +func (s *ResourceType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceType(input string) (*ResourceType, error) { + vals := map[string]ResourceType{ + "mongotocosmosdbmongo": ResourceTypeMongoToCosmosDbMongo, + "sqldb": ResourceTypeSqlDb, + "sqlmi": ResourceTypeSqlMi, + "sqlvm": ResourceTypeSqlVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceType(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccount.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccount.go new file mode 100644 index 00000000000..9323f674b22 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccount.go @@ -0,0 +1,130 @@ +package databasemigrations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&DatabaseAccountId{}) +} + +var _ resourceids.ResourceId = &DatabaseAccountId{} + +// DatabaseAccountId is a struct representing the Resource ID for a Database Account +type DatabaseAccountId struct { + SubscriptionId string + ResourceGroupName string + DatabaseAccountName string +} + +// NewDatabaseAccountID returns a new DatabaseAccountId struct +func NewDatabaseAccountID(subscriptionId string, resourceGroupName string, databaseAccountName string) DatabaseAccountId { + return DatabaseAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + DatabaseAccountName: databaseAccountName, + } +} + +// ParseDatabaseAccountID parses 'input' into a DatabaseAccountId +func ParseDatabaseAccountID(input string) (*DatabaseAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&DatabaseAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DatabaseAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseDatabaseAccountIDInsensitively parses 'input' case-insensitively into a DatabaseAccountId +// note: this method should only be used for API response data and not user input +func ParseDatabaseAccountIDInsensitively(input string) (*DatabaseAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&DatabaseAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DatabaseAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *DatabaseAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.DatabaseAccountName, ok = input.Parsed["databaseAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseAccountName", input) + } + + return nil +} + +// ValidateDatabaseAccountID checks that 'input' can be parsed as a Database Account ID +func ValidateDatabaseAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseDatabaseAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Database Account ID +func (id DatabaseAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DocumentDB/databaseAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.DatabaseAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Database Account ID +func (id DatabaseAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDocumentDB", "Microsoft.DocumentDB", "Microsoft.DocumentDB"), + resourceids.StaticSegment("staticDatabaseAccounts", "databaseAccounts", "databaseAccounts"), + resourceids.UserSpecifiedSegment("databaseAccountName", "databaseAccountName"), + } +} + +// String returns a human-readable description of this Database Account ID +func (id DatabaseAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Database Account Name: %q", id.DatabaseAccountName), + } + return fmt.Sprintf("Database Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccount_test.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccount_test.go new file mode 100644 index 00000000000..4b08d7a7162 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccount_test.go @@ -0,0 +1,282 @@ +package databasemigrations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &DatabaseAccountId{} + +func TestNewDatabaseAccountID(t *testing.T) { + id := NewDatabaseAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.DatabaseAccountName != "databaseAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseAccountName'", id.DatabaseAccountName, "databaseAccountName") + } +} + +func TestFormatDatabaseAccountID(t *testing.T) { + actual := NewDatabaseAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseDatabaseAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DatabaseAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName", + Expected: &DatabaseAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + DatabaseAccountName: "databaseAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseDatabaseAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.DatabaseAccountName != v.Expected.DatabaseAccountName { + t.Fatalf("Expected %q but got %q for DatabaseAccountName", v.Expected.DatabaseAccountName, actual.DatabaseAccountName) + } + + } +} + +func TestParseDatabaseAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DatabaseAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName", + Expected: &DatabaseAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + DatabaseAccountName: "databaseAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe", + Expected: &DatabaseAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + DatabaseAccountName: "dAtAbAsEaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseDatabaseAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.DatabaseAccountName != v.Expected.DatabaseAccountName { + t.Fatalf("Expected %q but got %q for DatabaseAccountName", v.Expected.DatabaseAccountName, actual.DatabaseAccountName) + } + + } +} + +func TestSegmentsForDatabaseAccountId(t *testing.T) { + segments := DatabaseAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("DatabaseAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccountproviders2databasemigration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccountproviders2databasemigration.go new file mode 100644 index 00000000000..12d120cd323 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccountproviders2databasemigration.go @@ -0,0 +1,141 @@ +package databasemigrations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&DatabaseAccountProviders2DatabaseMigrationId{}) +} + +var _ resourceids.ResourceId = &DatabaseAccountProviders2DatabaseMigrationId{} + +// DatabaseAccountProviders2DatabaseMigrationId is a struct representing the Resource ID for a Database Account Providers 2 Database Migration +type DatabaseAccountProviders2DatabaseMigrationId struct { + SubscriptionId string + ResourceGroupName string + DatabaseAccountName string + DatabaseMigrationName string +} + +// NewDatabaseAccountProviders2DatabaseMigrationID returns a new DatabaseAccountProviders2DatabaseMigrationId struct +func NewDatabaseAccountProviders2DatabaseMigrationID(subscriptionId string, resourceGroupName string, databaseAccountName string, databaseMigrationName string) DatabaseAccountProviders2DatabaseMigrationId { + return DatabaseAccountProviders2DatabaseMigrationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + DatabaseAccountName: databaseAccountName, + DatabaseMigrationName: databaseMigrationName, + } +} + +// ParseDatabaseAccountProviders2DatabaseMigrationID parses 'input' into a DatabaseAccountProviders2DatabaseMigrationId +func ParseDatabaseAccountProviders2DatabaseMigrationID(input string) (*DatabaseAccountProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&DatabaseAccountProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DatabaseAccountProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseDatabaseAccountProviders2DatabaseMigrationIDInsensitively parses 'input' case-insensitively into a DatabaseAccountProviders2DatabaseMigrationId +// note: this method should only be used for API response data and not user input +func ParseDatabaseAccountProviders2DatabaseMigrationIDInsensitively(input string) (*DatabaseAccountProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&DatabaseAccountProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DatabaseAccountProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *DatabaseAccountProviders2DatabaseMigrationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.DatabaseAccountName, ok = input.Parsed["databaseAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseAccountName", input) + } + + if id.DatabaseMigrationName, ok = input.Parsed["databaseMigrationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseMigrationName", input) + } + + return nil +} + +// ValidateDatabaseAccountProviders2DatabaseMigrationID checks that 'input' can be parsed as a Database Account Providers 2 Database Migration ID +func ValidateDatabaseAccountProviders2DatabaseMigrationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseDatabaseAccountProviders2DatabaseMigrationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Database Account Providers 2 Database Migration ID +func (id DatabaseAccountProviders2DatabaseMigrationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DocumentDB/databaseAccounts/%s/providers/Microsoft.DataMigration/databaseMigrations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.DatabaseAccountName, id.DatabaseMigrationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Database Account Providers 2 Database Migration ID +func (id DatabaseAccountProviders2DatabaseMigrationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDocumentDB", "Microsoft.DocumentDB", "Microsoft.DocumentDB"), + resourceids.StaticSegment("staticDatabaseAccounts", "databaseAccounts", "databaseAccounts"), + resourceids.UserSpecifiedSegment("databaseAccountName", "databaseAccountName"), + resourceids.StaticSegment("staticProviders2", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticDatabaseMigrations", "databaseMigrations", "databaseMigrations"), + resourceids.UserSpecifiedSegment("databaseMigrationName", "databaseMigrationName"), + } +} + +// String returns a human-readable description of this Database Account Providers 2 Database Migration ID +func (id DatabaseAccountProviders2DatabaseMigrationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Database Account Name: %q", id.DatabaseAccountName), + fmt.Sprintf("Database Migration Name: %q", id.DatabaseMigrationName), + } + return fmt.Sprintf("Database Account Providers 2 Database Migration (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccountproviders2databasemigration_test.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccountproviders2databasemigration_test.go new file mode 100644 index 00000000000..351b5d968c0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databaseaccountproviders2databasemigration_test.go @@ -0,0 +1,357 @@ +package databasemigrations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &DatabaseAccountProviders2DatabaseMigrationId{} + +func TestNewDatabaseAccountProviders2DatabaseMigrationID(t *testing.T) { + id := NewDatabaseAccountProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName", "databaseMigrationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.DatabaseAccountName != "databaseAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseAccountName'", id.DatabaseAccountName, "databaseAccountName") + } + + if id.DatabaseMigrationName != "databaseMigrationName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseMigrationName'", id.DatabaseMigrationName, "databaseMigrationName") + } +} + +func TestFormatDatabaseAccountProviders2DatabaseMigrationID(t *testing.T) { + actual := NewDatabaseAccountProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "databaseAccountName", "databaseMigrationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseDatabaseAccountProviders2DatabaseMigrationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DatabaseAccountProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &DatabaseAccountProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + DatabaseAccountName: "databaseAccountName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseDatabaseAccountProviders2DatabaseMigrationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.DatabaseAccountName != v.Expected.DatabaseAccountName { + t.Fatalf("Expected %q but got %q for DatabaseAccountName", v.Expected.DatabaseAccountName, actual.DatabaseAccountName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestParseDatabaseAccountProviders2DatabaseMigrationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DatabaseAccountProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &DatabaseAccountProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + DatabaseAccountName: "databaseAccountName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/databaseAccounts/databaseAccountName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe", + Expected: &DatabaseAccountProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + DatabaseAccountName: "dAtAbAsEaCcOuNtNaMe", + DatabaseMigrationName: "dAtAbAsEmIgRaTiOnNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/dAtAbAsEaCcOuNtS/dAtAbAsEaCcOuNtNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseDatabaseAccountProviders2DatabaseMigrationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.DatabaseAccountName != v.Expected.DatabaseAccountName { + t.Fatalf("Expected %q but got %q for DatabaseAccountName", v.Expected.DatabaseAccountName, actual.DatabaseAccountName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestSegmentsForDatabaseAccountProviders2DatabaseMigrationId(t *testing.T) { + segments := DatabaseAccountProviders2DatabaseMigrationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("DatabaseAccountProviders2DatabaseMigrationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_databasemigration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databasemigration.go new file mode 100644 index 00000000000..84bc0dc65f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databasemigration.go @@ -0,0 +1,141 @@ +package databasemigrations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&DatabaseMigrationId{}) +} + +var _ resourceids.ResourceId = &DatabaseMigrationId{} + +// DatabaseMigrationId is a struct representing the Resource ID for a Database Migration +type DatabaseMigrationId struct { + SubscriptionId string + ResourceGroupName string + ServerName string + DatabaseMigrationName string +} + +// NewDatabaseMigrationID returns a new DatabaseMigrationId struct +func NewDatabaseMigrationID(subscriptionId string, resourceGroupName string, serverName string, databaseMigrationName string) DatabaseMigrationId { + return DatabaseMigrationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServerName: serverName, + DatabaseMigrationName: databaseMigrationName, + } +} + +// ParseDatabaseMigrationID parses 'input' into a DatabaseMigrationId +func ParseDatabaseMigrationID(input string) (*DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&DatabaseMigrationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseDatabaseMigrationIDInsensitively parses 'input' case-insensitively into a DatabaseMigrationId +// note: this method should only be used for API response data and not user input +func ParseDatabaseMigrationIDInsensitively(input string) (*DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&DatabaseMigrationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *DatabaseMigrationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServerName, ok = input.Parsed["serverName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serverName", input) + } + + if id.DatabaseMigrationName, ok = input.Parsed["databaseMigrationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseMigrationName", input) + } + + return nil +} + +// ValidateDatabaseMigrationID checks that 'input' can be parsed as a Database Migration ID +func ValidateDatabaseMigrationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseDatabaseMigrationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Database Migration ID +func (id DatabaseMigrationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Sql/servers/%s/providers/Microsoft.DataMigration/databaseMigrations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServerName, id.DatabaseMigrationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Database Migration ID +func (id DatabaseMigrationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftSql", "Microsoft.Sql", "Microsoft.Sql"), + resourceids.StaticSegment("staticServers", "servers", "servers"), + resourceids.UserSpecifiedSegment("serverName", "serverName"), + resourceids.StaticSegment("staticProviders2", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticDatabaseMigrations", "databaseMigrations", "databaseMigrations"), + resourceids.UserSpecifiedSegment("databaseMigrationName", "databaseMigrationName"), + } +} + +// String returns a human-readable description of this Database Migration ID +func (id DatabaseMigrationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Server Name: %q", id.ServerName), + fmt.Sprintf("Database Migration Name: %q", id.DatabaseMigrationName), + } + return fmt.Sprintf("Database Migration (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_databasemigration_test.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databasemigration_test.go new file mode 100644 index 00000000000..dfc641bb16e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_databasemigration_test.go @@ -0,0 +1,357 @@ +package databasemigrations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &DatabaseMigrationId{} + +func TestNewDatabaseMigrationID(t *testing.T) { + id := NewDatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "serverName", "databaseMigrationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ServerName != "serverName" { + t.Fatalf("Expected %q but got %q for Segment 'ServerName'", id.ServerName, "serverName") + } + + if id.DatabaseMigrationName != "databaseMigrationName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseMigrationName'", id.DatabaseMigrationName, "databaseMigrationName") + } +} + +func TestFormatDatabaseMigrationID(t *testing.T) { + actual := NewDatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "serverName", "databaseMigrationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseDatabaseMigrationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ServerName: "serverName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseDatabaseMigrationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServerName != v.Expected.ServerName { + t.Fatalf("Expected %q but got %q for ServerName", v.Expected.ServerName, actual.ServerName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestParseDatabaseMigrationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/sErVeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/sErVeRs/sErVeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/sErVeRs/sErVeRnAmE/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/sErVeRs/sErVeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/sErVeRs/sErVeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ServerName: "serverName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/servers/serverName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/sErVeRs/sErVeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe", + Expected: &DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ServerName: "sErVeRnAmE", + DatabaseMigrationName: "dAtAbAsEmIgRaTiOnNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/sErVeRs/sErVeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseDatabaseMigrationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServerName != v.Expected.ServerName { + t.Fatalf("Expected %q but got %q for ServerName", v.Expected.ServerName, actual.ServerName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestSegmentsForDatabaseMigrationId(t *testing.T) { + segments := DatabaseMigrationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("DatabaseMigrationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongocluster.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongocluster.go new file mode 100644 index 00000000000..be4b2741244 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongocluster.go @@ -0,0 +1,130 @@ +package databasemigrations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&MongoClusterId{}) +} + +var _ resourceids.ResourceId = &MongoClusterId{} + +// MongoClusterId is a struct representing the Resource ID for a Mongo Cluster +type MongoClusterId struct { + SubscriptionId string + ResourceGroupName string + MongoClusterName string +} + +// NewMongoClusterID returns a new MongoClusterId struct +func NewMongoClusterID(subscriptionId string, resourceGroupName string, mongoClusterName string) MongoClusterId { + return MongoClusterId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + MongoClusterName: mongoClusterName, + } +} + +// ParseMongoClusterID parses 'input' into a MongoClusterId +func ParseMongoClusterID(input string) (*MongoClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(&MongoClusterId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MongoClusterId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseMongoClusterIDInsensitively parses 'input' case-insensitively into a MongoClusterId +// note: this method should only be used for API response data and not user input +func ParseMongoClusterIDInsensitively(input string) (*MongoClusterId, error) { + parser := resourceids.NewParserFromResourceIdType(&MongoClusterId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MongoClusterId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *MongoClusterId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.MongoClusterName, ok = input.Parsed["mongoClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "mongoClusterName", input) + } + + return nil +} + +// ValidateMongoClusterID checks that 'input' can be parsed as a Mongo Cluster ID +func ValidateMongoClusterID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMongoClusterID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Mongo Cluster ID +func (id MongoClusterId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DocumentDB/mongoClusters/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.MongoClusterName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Mongo Cluster ID +func (id MongoClusterId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDocumentDB", "Microsoft.DocumentDB", "Microsoft.DocumentDB"), + resourceids.StaticSegment("staticMongoClusters", "mongoClusters", "mongoClusters"), + resourceids.UserSpecifiedSegment("mongoClusterName", "mongoClusterName"), + } +} + +// String returns a human-readable description of this Mongo Cluster ID +func (id MongoClusterId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Mongo Cluster Name: %q", id.MongoClusterName), + } + return fmt.Sprintf("Mongo Cluster (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongocluster_test.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongocluster_test.go new file mode 100644 index 00000000000..b254d6831ea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongocluster_test.go @@ -0,0 +1,282 @@ +package databasemigrations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &MongoClusterId{} + +func TestNewMongoClusterID(t *testing.T) { + id := NewMongoClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.MongoClusterName != "mongoClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'MongoClusterName'", id.MongoClusterName, "mongoClusterName") + } +} + +func TestFormatMongoClusterID(t *testing.T) { + actual := NewMongoClusterID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseMongoClusterID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MongoClusterId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName", + Expected: &MongoClusterId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + MongoClusterName: "mongoClusterName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMongoClusterID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.MongoClusterName != v.Expected.MongoClusterName { + t.Fatalf("Expected %q but got %q for MongoClusterName", v.Expected.MongoClusterName, actual.MongoClusterName) + } + + } +} + +func TestParseMongoClusterIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MongoClusterId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName", + Expected: &MongoClusterId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + MongoClusterName: "mongoClusterName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE", + Expected: &MongoClusterId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + MongoClusterName: "mOnGoClUsTeRnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMongoClusterIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.MongoClusterName != v.Expected.MongoClusterName { + t.Fatalf("Expected %q but got %q for MongoClusterName", v.Expected.MongoClusterName, actual.MongoClusterName) + } + + } +} + +func TestSegmentsForMongoClusterId(t *testing.T) { + segments := MongoClusterId{}.Segments() + if len(segments) == 0 { + t.Fatalf("MongoClusterId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongoclusterproviders2databasemigration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongoclusterproviders2databasemigration.go new file mode 100644 index 00000000000..15663b4ca2b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongoclusterproviders2databasemigration.go @@ -0,0 +1,141 @@ +package databasemigrations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&MongoClusterProviders2DatabaseMigrationId{}) +} + +var _ resourceids.ResourceId = &MongoClusterProviders2DatabaseMigrationId{} + +// MongoClusterProviders2DatabaseMigrationId is a struct representing the Resource ID for a Mongo Cluster Providers 2 Database Migration +type MongoClusterProviders2DatabaseMigrationId struct { + SubscriptionId string + ResourceGroupName string + MongoClusterName string + DatabaseMigrationName string +} + +// NewMongoClusterProviders2DatabaseMigrationID returns a new MongoClusterProviders2DatabaseMigrationId struct +func NewMongoClusterProviders2DatabaseMigrationID(subscriptionId string, resourceGroupName string, mongoClusterName string, databaseMigrationName string) MongoClusterProviders2DatabaseMigrationId { + return MongoClusterProviders2DatabaseMigrationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + MongoClusterName: mongoClusterName, + DatabaseMigrationName: databaseMigrationName, + } +} + +// ParseMongoClusterProviders2DatabaseMigrationID parses 'input' into a MongoClusterProviders2DatabaseMigrationId +func ParseMongoClusterProviders2DatabaseMigrationID(input string) (*MongoClusterProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&MongoClusterProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MongoClusterProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseMongoClusterProviders2DatabaseMigrationIDInsensitively parses 'input' case-insensitively into a MongoClusterProviders2DatabaseMigrationId +// note: this method should only be used for API response data and not user input +func ParseMongoClusterProviders2DatabaseMigrationIDInsensitively(input string) (*MongoClusterProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&MongoClusterProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MongoClusterProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *MongoClusterProviders2DatabaseMigrationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.MongoClusterName, ok = input.Parsed["mongoClusterName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "mongoClusterName", input) + } + + if id.DatabaseMigrationName, ok = input.Parsed["databaseMigrationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseMigrationName", input) + } + + return nil +} + +// ValidateMongoClusterProviders2DatabaseMigrationID checks that 'input' can be parsed as a Mongo Cluster Providers 2 Database Migration ID +func ValidateMongoClusterProviders2DatabaseMigrationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMongoClusterProviders2DatabaseMigrationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Mongo Cluster Providers 2 Database Migration ID +func (id MongoClusterProviders2DatabaseMigrationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DocumentDB/mongoClusters/%s/providers/Microsoft.DataMigration/databaseMigrations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.MongoClusterName, id.DatabaseMigrationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Mongo Cluster Providers 2 Database Migration ID +func (id MongoClusterProviders2DatabaseMigrationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDocumentDB", "Microsoft.DocumentDB", "Microsoft.DocumentDB"), + resourceids.StaticSegment("staticMongoClusters", "mongoClusters", "mongoClusters"), + resourceids.UserSpecifiedSegment("mongoClusterName", "mongoClusterName"), + resourceids.StaticSegment("staticProviders2", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticDatabaseMigrations", "databaseMigrations", "databaseMigrations"), + resourceids.UserSpecifiedSegment("databaseMigrationName", "databaseMigrationName"), + } +} + +// String returns a human-readable description of this Mongo Cluster Providers 2 Database Migration ID +func (id MongoClusterProviders2DatabaseMigrationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Mongo Cluster Name: %q", id.MongoClusterName), + fmt.Sprintf("Database Migration Name: %q", id.DatabaseMigrationName), + } + return fmt.Sprintf("Mongo Cluster Providers 2 Database Migration (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongoclusterproviders2databasemigration_test.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongoclusterproviders2databasemigration_test.go new file mode 100644 index 00000000000..5503e3ed72b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_mongoclusterproviders2databasemigration_test.go @@ -0,0 +1,357 @@ +package databasemigrations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &MongoClusterProviders2DatabaseMigrationId{} + +func TestNewMongoClusterProviders2DatabaseMigrationID(t *testing.T) { + id := NewMongoClusterProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName", "databaseMigrationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.MongoClusterName != "mongoClusterName" { + t.Fatalf("Expected %q but got %q for Segment 'MongoClusterName'", id.MongoClusterName, "mongoClusterName") + } + + if id.DatabaseMigrationName != "databaseMigrationName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseMigrationName'", id.DatabaseMigrationName, "databaseMigrationName") + } +} + +func TestFormatMongoClusterProviders2DatabaseMigrationID(t *testing.T) { + actual := NewMongoClusterProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "mongoClusterName", "databaseMigrationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseMongoClusterProviders2DatabaseMigrationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MongoClusterProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &MongoClusterProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + MongoClusterName: "mongoClusterName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMongoClusterProviders2DatabaseMigrationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.MongoClusterName != v.Expected.MongoClusterName { + t.Fatalf("Expected %q but got %q for MongoClusterName", v.Expected.MongoClusterName, actual.MongoClusterName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestParseMongoClusterProviders2DatabaseMigrationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MongoClusterProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &MongoClusterProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + MongoClusterName: "mongoClusterName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DocumentDB/mongoClusters/mongoClusterName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe", + Expected: &MongoClusterProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + MongoClusterName: "mOnGoClUsTeRnAmE", + DatabaseMigrationName: "dAtAbAsEmIgRaTiOnNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dOcUmEnTdB/mOnGoClUsTeRs/mOnGoClUsTeRnAmE/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMongoClusterProviders2DatabaseMigrationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.MongoClusterName != v.Expected.MongoClusterName { + t.Fatalf("Expected %q but got %q for MongoClusterName", v.Expected.MongoClusterName, actual.MongoClusterName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestSegmentsForMongoClusterProviders2DatabaseMigrationId(t *testing.T) { + segments := MongoClusterProviders2DatabaseMigrationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("MongoClusterProviders2DatabaseMigrationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_providers2databasemigration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_providers2databasemigration.go new file mode 100644 index 00000000000..7ad8743c55c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_providers2databasemigration.go @@ -0,0 +1,141 @@ +package databasemigrations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&Providers2DatabaseMigrationId{}) +} + +var _ resourceids.ResourceId = &Providers2DatabaseMigrationId{} + +// Providers2DatabaseMigrationId is a struct representing the Resource ID for a Providers 2 Database Migration +type Providers2DatabaseMigrationId struct { + SubscriptionId string + ResourceGroupName string + ManagedInstanceName string + DatabaseMigrationName string +} + +// NewProviders2DatabaseMigrationID returns a new Providers2DatabaseMigrationId struct +func NewProviders2DatabaseMigrationID(subscriptionId string, resourceGroupName string, managedInstanceName string, databaseMigrationName string) Providers2DatabaseMigrationId { + return Providers2DatabaseMigrationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ManagedInstanceName: managedInstanceName, + DatabaseMigrationName: databaseMigrationName, + } +} + +// ParseProviders2DatabaseMigrationID parses 'input' into a Providers2DatabaseMigrationId +func ParseProviders2DatabaseMigrationID(input string) (*Providers2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&Providers2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := Providers2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProviders2DatabaseMigrationIDInsensitively parses 'input' case-insensitively into a Providers2DatabaseMigrationId +// note: this method should only be used for API response data and not user input +func ParseProviders2DatabaseMigrationIDInsensitively(input string) (*Providers2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&Providers2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := Providers2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *Providers2DatabaseMigrationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ManagedInstanceName, ok = input.Parsed["managedInstanceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "managedInstanceName", input) + } + + if id.DatabaseMigrationName, ok = input.Parsed["databaseMigrationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseMigrationName", input) + } + + return nil +} + +// ValidateProviders2DatabaseMigrationID checks that 'input' can be parsed as a Providers 2 Database Migration ID +func ValidateProviders2DatabaseMigrationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProviders2DatabaseMigrationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Providers 2 Database Migration ID +func (id Providers2DatabaseMigrationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Sql/managedInstances/%s/providers/Microsoft.DataMigration/databaseMigrations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ManagedInstanceName, id.DatabaseMigrationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Providers 2 Database Migration ID +func (id Providers2DatabaseMigrationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftSql", "Microsoft.Sql", "Microsoft.Sql"), + resourceids.StaticSegment("staticManagedInstances", "managedInstances", "managedInstances"), + resourceids.UserSpecifiedSegment("managedInstanceName", "managedInstanceName"), + resourceids.StaticSegment("staticProviders2", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticDatabaseMigrations", "databaseMigrations", "databaseMigrations"), + resourceids.UserSpecifiedSegment("databaseMigrationName", "databaseMigrationName"), + } +} + +// String returns a human-readable description of this Providers 2 Database Migration ID +func (id Providers2DatabaseMigrationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Managed Instance Name: %q", id.ManagedInstanceName), + fmt.Sprintf("Database Migration Name: %q", id.DatabaseMigrationName), + } + return fmt.Sprintf("Providers 2 Database Migration (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_providers2databasemigration_test.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_providers2databasemigration_test.go new file mode 100644 index 00000000000..928124afd26 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_providers2databasemigration_test.go @@ -0,0 +1,357 @@ +package databasemigrations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &Providers2DatabaseMigrationId{} + +func TestNewProviders2DatabaseMigrationID(t *testing.T) { + id := NewProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedInstanceName", "databaseMigrationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.ManagedInstanceName != "managedInstanceName" { + t.Fatalf("Expected %q but got %q for Segment 'ManagedInstanceName'", id.ManagedInstanceName, "managedInstanceName") + } + + if id.DatabaseMigrationName != "databaseMigrationName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseMigrationName'", id.DatabaseMigrationName, "databaseMigrationName") + } +} + +func TestFormatProviders2DatabaseMigrationID(t *testing.T) { + actual := NewProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "managedInstanceName", "databaseMigrationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProviders2DatabaseMigrationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *Providers2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &Providers2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedInstanceName: "managedInstanceName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProviders2DatabaseMigrationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedInstanceName != v.Expected.ManagedInstanceName { + t.Fatalf("Expected %q but got %q for ManagedInstanceName", v.Expected.ManagedInstanceName, actual.ManagedInstanceName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestParseProviders2DatabaseMigrationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *Providers2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/mAnAgEdInStAnCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/mAnAgEdInStAnCeS/mAnAgEdInStAnCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/mAnAgEdInStAnCeS/mAnAgEdInStAnCeNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/mAnAgEdInStAnCeS/mAnAgEdInStAnCeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/mAnAgEdInStAnCeS/mAnAgEdInStAnCeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &Providers2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + ManagedInstanceName: "managedInstanceName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.Sql/managedInstances/managedInstanceName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/mAnAgEdInStAnCeS/mAnAgEdInStAnCeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe", + Expected: &Providers2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + ManagedInstanceName: "mAnAgEdInStAnCeNaMe", + DatabaseMigrationName: "dAtAbAsEmIgRaTiOnNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQl/mAnAgEdInStAnCeS/mAnAgEdInStAnCeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProviders2DatabaseMigrationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ManagedInstanceName != v.Expected.ManagedInstanceName { + t.Fatalf("Expected %q but got %q for ManagedInstanceName", v.Expected.ManagedInstanceName, actual.ManagedInstanceName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestSegmentsForProviders2DatabaseMigrationId(t *testing.T) { + segments := Providers2DatabaseMigrationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("Providers2DatabaseMigrationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_sqlvirtualmachineproviders2databasemigration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_sqlvirtualmachineproviders2databasemigration.go new file mode 100644 index 00000000000..c9ec8e56def --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_sqlvirtualmachineproviders2databasemigration.go @@ -0,0 +1,141 @@ +package databasemigrations + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SqlVirtualMachineProviders2DatabaseMigrationId{}) +} + +var _ resourceids.ResourceId = &SqlVirtualMachineProviders2DatabaseMigrationId{} + +// SqlVirtualMachineProviders2DatabaseMigrationId is a struct representing the Resource ID for a Sql Virtual Machine Providers 2 Database Migration +type SqlVirtualMachineProviders2DatabaseMigrationId struct { + SubscriptionId string + ResourceGroupName string + SqlVirtualMachineName string + DatabaseMigrationName string +} + +// NewSqlVirtualMachineProviders2DatabaseMigrationID returns a new SqlVirtualMachineProviders2DatabaseMigrationId struct +func NewSqlVirtualMachineProviders2DatabaseMigrationID(subscriptionId string, resourceGroupName string, sqlVirtualMachineName string, databaseMigrationName string) SqlVirtualMachineProviders2DatabaseMigrationId { + return SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + SqlVirtualMachineName: sqlVirtualMachineName, + DatabaseMigrationName: databaseMigrationName, + } +} + +// ParseSqlVirtualMachineProviders2DatabaseMigrationID parses 'input' into a SqlVirtualMachineProviders2DatabaseMigrationId +func ParseSqlVirtualMachineProviders2DatabaseMigrationID(input string) (*SqlVirtualMachineProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&SqlVirtualMachineProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SqlVirtualMachineProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively parses 'input' case-insensitively into a SqlVirtualMachineProviders2DatabaseMigrationId +// note: this method should only be used for API response data and not user input +func ParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively(input string) (*SqlVirtualMachineProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&SqlVirtualMachineProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SqlVirtualMachineProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SqlVirtualMachineProviders2DatabaseMigrationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.SqlVirtualMachineName, ok = input.Parsed["sqlVirtualMachineName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "sqlVirtualMachineName", input) + } + + if id.DatabaseMigrationName, ok = input.Parsed["databaseMigrationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseMigrationName", input) + } + + return nil +} + +// ValidateSqlVirtualMachineProviders2DatabaseMigrationID checks that 'input' can be parsed as a Sql Virtual Machine Providers 2 Database Migration ID +func ValidateSqlVirtualMachineProviders2DatabaseMigrationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSqlVirtualMachineProviders2DatabaseMigrationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Sql Virtual Machine Providers 2 Database Migration ID +func (id SqlVirtualMachineProviders2DatabaseMigrationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/%s/providers/Microsoft.DataMigration/databaseMigrations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.SqlVirtualMachineName, id.DatabaseMigrationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Sql Virtual Machine Providers 2 Database Migration ID +func (id SqlVirtualMachineProviders2DatabaseMigrationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftSqlVirtualMachine", "Microsoft.SqlVirtualMachine", "Microsoft.SqlVirtualMachine"), + resourceids.StaticSegment("staticSqlVirtualMachines", "sqlVirtualMachines", "sqlVirtualMachines"), + resourceids.UserSpecifiedSegment("sqlVirtualMachineName", "sqlVirtualMachineName"), + resourceids.StaticSegment("staticProviders2", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticDatabaseMigrations", "databaseMigrations", "databaseMigrations"), + resourceids.UserSpecifiedSegment("databaseMigrationName", "databaseMigrationName"), + } +} + +// String returns a human-readable description of this Sql Virtual Machine Providers 2 Database Migration ID +func (id SqlVirtualMachineProviders2DatabaseMigrationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Sql Virtual Machine Name: %q", id.SqlVirtualMachineName), + fmt.Sprintf("Database Migration Name: %q", id.DatabaseMigrationName), + } + return fmt.Sprintf("Sql Virtual Machine Providers 2 Database Migration (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/id_sqlvirtualmachineproviders2databasemigration_test.go b/resource-manager/datamigration/2025-06-30/databasemigrations/id_sqlvirtualmachineproviders2databasemigration_test.go new file mode 100644 index 00000000000..74de77fcd5b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/id_sqlvirtualmachineproviders2databasemigration_test.go @@ -0,0 +1,357 @@ +package databasemigrations + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SqlVirtualMachineProviders2DatabaseMigrationId{} + +func TestNewSqlVirtualMachineProviders2DatabaseMigrationID(t *testing.T) { + id := NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.SqlVirtualMachineName != "sqlVirtualMachineName" { + t.Fatalf("Expected %q but got %q for Segment 'SqlVirtualMachineName'", id.SqlVirtualMachineName, "sqlVirtualMachineName") + } + + if id.DatabaseMigrationName != "databaseMigrationName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseMigrationName'", id.DatabaseMigrationName, "databaseMigrationName") + } +} + +func TestFormatSqlVirtualMachineProviders2DatabaseMigrationID(t *testing.T) { + actual := NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSqlVirtualMachineProviders2DatabaseMigrationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SqlVirtualMachineProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SqlVirtualMachineName: "sqlVirtualMachineName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSqlVirtualMachineProviders2DatabaseMigrationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SqlVirtualMachineName != v.Expected.SqlVirtualMachineName { + t.Fatalf("Expected %q but got %q for SqlVirtualMachineName", v.Expected.SqlVirtualMachineName, actual.SqlVirtualMachineName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SqlVirtualMachineProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SqlVirtualMachineName: "sqlVirtualMachineName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe", + Expected: &SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + SqlVirtualMachineName: "sQlViRtUaLmAcHiNeNaMe", + DatabaseMigrationName: "dAtAbAsEmIgRaTiOnNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SqlVirtualMachineName != v.Expected.SqlVirtualMachineName { + t.Fatalf("Expected %q but got %q for SqlVirtualMachineName", v.Expected.SqlVirtualMachineName, actual.SqlVirtualMachineName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestSegmentsForSqlVirtualMachineProviders2DatabaseMigrationId(t *testing.T) { + segments := SqlVirtualMachineProviders2DatabaseMigrationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SqlVirtualMachineProviders2DatabaseMigrationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongocreate.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongocreate.go new file mode 100644 index 00000000000..c8ecce8de64 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongocreate.go @@ -0,0 +1,75 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbRUMongoCreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationCosmosDbMongo +} + +// MongoToCosmosDbRUMongoCreate ... +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoCreate(ctx context.Context, id DatabaseAccountProviders2DatabaseMigrationId, input DatabaseMigrationCosmosDbMongo) (result MongoToCosmosDbRUMongoCreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// MongoToCosmosDbRUMongoCreateThenPoll performs MongoToCosmosDbRUMongoCreate then polls until it's completed +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoCreateThenPoll(ctx context.Context, id DatabaseAccountProviders2DatabaseMigrationId, input DatabaseMigrationCosmosDbMongo) error { + result, err := c.MongoToCosmosDbRUMongoCreate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing MongoToCosmosDbRUMongoCreate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after MongoToCosmosDbRUMongoCreate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongodelete.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongodelete.go new file mode 100644 index 00000000000..afa227a89ac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongodelete.go @@ -0,0 +1,99 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbRUMongoDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type MongoToCosmosDbRUMongoDeleteOperationOptions struct { + Force *bool +} + +func DefaultMongoToCosmosDbRUMongoDeleteOperationOptions() MongoToCosmosDbRUMongoDeleteOperationOptions { + return MongoToCosmosDbRUMongoDeleteOperationOptions{} +} + +func (o MongoToCosmosDbRUMongoDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o MongoToCosmosDbRUMongoDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o MongoToCosmosDbRUMongoDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Force != nil { + out.Append("force", fmt.Sprintf("%v", *o.Force)) + } + return &out +} + +// MongoToCosmosDbRUMongoDelete ... +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoDelete(ctx context.Context, id DatabaseAccountProviders2DatabaseMigrationId, options MongoToCosmosDbRUMongoDeleteOperationOptions) (result MongoToCosmosDbRUMongoDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// MongoToCosmosDbRUMongoDeleteThenPoll performs MongoToCosmosDbRUMongoDelete then polls until it's completed +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoDeleteThenPoll(ctx context.Context, id DatabaseAccountProviders2DatabaseMigrationId, options MongoToCosmosDbRUMongoDeleteOperationOptions) error { + result, err := c.MongoToCosmosDbRUMongoDelete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing MongoToCosmosDbRUMongoDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after MongoToCosmosDbRUMongoDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongoget.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongoget.go new file mode 100644 index 00000000000..0e3606884d3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongoget.go @@ -0,0 +1,53 @@ +package databasemigrations + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbRUMongoGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationCosmosDbMongo +} + +// MongoToCosmosDbRUMongoGet ... +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoGet(ctx context.Context, id DatabaseAccountProviders2DatabaseMigrationId) (result MongoToCosmosDbRUMongoGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DatabaseMigrationCosmosDbMongo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongogetforscope.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongogetforscope.go new file mode 100644 index 00000000000..d13d7493b55 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbrumongogetforscope.go @@ -0,0 +1,105 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbRUMongoGetForScopeOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DatabaseMigrationCosmosDbMongo +} + +type MongoToCosmosDbRUMongoGetForScopeCompleteResult struct { + LatestHttpResponse *http.Response + Items []DatabaseMigrationCosmosDbMongo +} + +type MongoToCosmosDbRUMongoGetForScopeCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *MongoToCosmosDbRUMongoGetForScopeCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// MongoToCosmosDbRUMongoGetForScope ... +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoGetForScope(ctx context.Context, id DatabaseAccountId) (result MongoToCosmosDbRUMongoGetForScopeOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &MongoToCosmosDbRUMongoGetForScopeCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/databaseMigrations", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DatabaseMigrationCosmosDbMongo `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// MongoToCosmosDbRUMongoGetForScopeComplete retrieves all the results into a single object +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoGetForScopeComplete(ctx context.Context, id DatabaseAccountId) (MongoToCosmosDbRUMongoGetForScopeCompleteResult, error) { + return c.MongoToCosmosDbRUMongoGetForScopeCompleteMatchingPredicate(ctx, id, DatabaseMigrationCosmosDbMongoOperationPredicate{}) +} + +// MongoToCosmosDbRUMongoGetForScopeCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c DatabaseMigrationsClient) MongoToCosmosDbRUMongoGetForScopeCompleteMatchingPredicate(ctx context.Context, id DatabaseAccountId, predicate DatabaseMigrationCosmosDbMongoOperationPredicate) (result MongoToCosmosDbRUMongoGetForScopeCompleteResult, err error) { + items := make([]DatabaseMigrationCosmosDbMongo, 0) + + resp, err := c.MongoToCosmosDbRUMongoGetForScope(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = MongoToCosmosDbRUMongoGetForScopeCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongocreate.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongocreate.go new file mode 100644 index 00000000000..0e7926641ed --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongocreate.go @@ -0,0 +1,75 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbvCoreMongoCreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationCosmosDbMongo +} + +// MongoToCosmosDbvCoreMongoCreate ... +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoCreate(ctx context.Context, id MongoClusterProviders2DatabaseMigrationId, input DatabaseMigrationCosmosDbMongo) (result MongoToCosmosDbvCoreMongoCreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// MongoToCosmosDbvCoreMongoCreateThenPoll performs MongoToCosmosDbvCoreMongoCreate then polls until it's completed +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoCreateThenPoll(ctx context.Context, id MongoClusterProviders2DatabaseMigrationId, input DatabaseMigrationCosmosDbMongo) error { + result, err := c.MongoToCosmosDbvCoreMongoCreate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing MongoToCosmosDbvCoreMongoCreate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after MongoToCosmosDbvCoreMongoCreate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongodelete.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongodelete.go new file mode 100644 index 00000000000..449efeffdab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongodelete.go @@ -0,0 +1,99 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbvCoreMongoDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type MongoToCosmosDbvCoreMongoDeleteOperationOptions struct { + Force *bool +} + +func DefaultMongoToCosmosDbvCoreMongoDeleteOperationOptions() MongoToCosmosDbvCoreMongoDeleteOperationOptions { + return MongoToCosmosDbvCoreMongoDeleteOperationOptions{} +} + +func (o MongoToCosmosDbvCoreMongoDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o MongoToCosmosDbvCoreMongoDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o MongoToCosmosDbvCoreMongoDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Force != nil { + out.Append("force", fmt.Sprintf("%v", *o.Force)) + } + return &out +} + +// MongoToCosmosDbvCoreMongoDelete ... +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoDelete(ctx context.Context, id MongoClusterProviders2DatabaseMigrationId, options MongoToCosmosDbvCoreMongoDeleteOperationOptions) (result MongoToCosmosDbvCoreMongoDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// MongoToCosmosDbvCoreMongoDeleteThenPoll performs MongoToCosmosDbvCoreMongoDelete then polls until it's completed +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoDeleteThenPoll(ctx context.Context, id MongoClusterProviders2DatabaseMigrationId, options MongoToCosmosDbvCoreMongoDeleteOperationOptions) error { + result, err := c.MongoToCosmosDbvCoreMongoDelete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing MongoToCosmosDbvCoreMongoDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after MongoToCosmosDbvCoreMongoDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongoget.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongoget.go new file mode 100644 index 00000000000..89f98af2ab3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongoget.go @@ -0,0 +1,53 @@ +package databasemigrations + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbvCoreMongoGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationCosmosDbMongo +} + +// MongoToCosmosDbvCoreMongoGet ... +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoGet(ctx context.Context, id MongoClusterProviders2DatabaseMigrationId) (result MongoToCosmosDbvCoreMongoGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DatabaseMigrationCosmosDbMongo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongogetforscope.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongogetforscope.go new file mode 100644 index 00000000000..d0f0e3781f1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_mongotocosmosdbvcoremongogetforscope.go @@ -0,0 +1,105 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoToCosmosDbvCoreMongoGetForScopeOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DatabaseMigrationCosmosDbMongo +} + +type MongoToCosmosDbvCoreMongoGetForScopeCompleteResult struct { + LatestHttpResponse *http.Response + Items []DatabaseMigrationCosmosDbMongo +} + +type MongoToCosmosDbvCoreMongoGetForScopeCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *MongoToCosmosDbvCoreMongoGetForScopeCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// MongoToCosmosDbvCoreMongoGetForScope ... +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoGetForScope(ctx context.Context, id MongoClusterId) (result MongoToCosmosDbvCoreMongoGetForScopeOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &MongoToCosmosDbvCoreMongoGetForScopeCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/databaseMigrations", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DatabaseMigrationCosmosDbMongo `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// MongoToCosmosDbvCoreMongoGetForScopeComplete retrieves all the results into a single object +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoGetForScopeComplete(ctx context.Context, id MongoClusterId) (MongoToCosmosDbvCoreMongoGetForScopeCompleteResult, error) { + return c.MongoToCosmosDbvCoreMongoGetForScopeCompleteMatchingPredicate(ctx, id, DatabaseMigrationCosmosDbMongoOperationPredicate{}) +} + +// MongoToCosmosDbvCoreMongoGetForScopeCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c DatabaseMigrationsClient) MongoToCosmosDbvCoreMongoGetForScopeCompleteMatchingPredicate(ctx context.Context, id MongoClusterId, predicate DatabaseMigrationCosmosDbMongoOperationPredicate) (result MongoToCosmosDbvCoreMongoGetForScopeCompleteResult, err error) { + items := make([]DatabaseMigrationCosmosDbMongo, 0) + + resp, err := c.MongoToCosmosDbvCoreMongoGetForScope(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = MongoToCosmosDbvCoreMongoGetForScopeCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbcancel.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbcancel.go new file mode 100644 index 00000000000..520447c0ab7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbcancel.go @@ -0,0 +1,74 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbcancelOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// SqlDbcancel ... +func (c DatabaseMigrationsClient) SqlDbcancel(ctx context.Context, id DatabaseMigrationId, input MigrationOperationInput) (result SqlDbcancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlDbcancelThenPoll performs SqlDbcancel then polls until it's completed +func (c DatabaseMigrationsClient) SqlDbcancelThenPoll(ctx context.Context, id DatabaseMigrationId, input MigrationOperationInput) error { + result, err := c.SqlDbcancel(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlDbcancel: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlDbcancel: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbcreateorupdate.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbcreateorupdate.go new file mode 100644 index 00000000000..00bf625328d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbcreateorupdate.go @@ -0,0 +1,75 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlDb +} + +// SqlDbCreateOrUpdate ... +func (c DatabaseMigrationsClient) SqlDbCreateOrUpdate(ctx context.Context, id DatabaseMigrationId, input DatabaseMigrationSqlDb) (result SqlDbCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlDbCreateOrUpdateThenPoll performs SqlDbCreateOrUpdate then polls until it's completed +func (c DatabaseMigrationsClient) SqlDbCreateOrUpdateThenPoll(ctx context.Context, id DatabaseMigrationId, input DatabaseMigrationSqlDb) error { + result, err := c.SqlDbCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlDbCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlDbCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbdelete.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbdelete.go new file mode 100644 index 00000000000..fe724f51ff3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbdelete.go @@ -0,0 +1,100 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type SqlDbDeleteOperationOptions struct { + Force *bool +} + +func DefaultSqlDbDeleteOperationOptions() SqlDbDeleteOperationOptions { + return SqlDbDeleteOperationOptions{} +} + +func (o SqlDbDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o SqlDbDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o SqlDbDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Force != nil { + out.Append("force", fmt.Sprintf("%v", *o.Force)) + } + return &out +} + +// SqlDbDelete ... +func (c DatabaseMigrationsClient) SqlDbDelete(ctx context.Context, id DatabaseMigrationId, options SqlDbDeleteOperationOptions) (result SqlDbDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlDbDeleteThenPoll performs SqlDbDelete then polls until it's completed +func (c DatabaseMigrationsClient) SqlDbDeleteThenPoll(ctx context.Context, id DatabaseMigrationId, options SqlDbDeleteOperationOptions) error { + result, err := c.SqlDbDelete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing SqlDbDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlDbDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbget.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbget.go new file mode 100644 index 00000000000..b966d32ab82 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbget.go @@ -0,0 +1,87 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlDb +} + +type SqlDbGetOperationOptions struct { + Expand *string + MigrationOperationId *string +} + +func DefaultSqlDbGetOperationOptions() SqlDbGetOperationOptions { + return SqlDbGetOperationOptions{} +} + +func (o SqlDbGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o SqlDbGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o SqlDbGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + if o.MigrationOperationId != nil { + out.Append("migrationOperationId", fmt.Sprintf("%v", *o.MigrationOperationId)) + } + return &out +} + +// SqlDbGet ... +func (c DatabaseMigrationsClient) SqlDbGet(ctx context.Context, id DatabaseMigrationId, options SqlDbGetOperationOptions) (result SqlDbGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DatabaseMigrationSqlDb + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbretry.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbretry.go new file mode 100644 index 00000000000..d4376c4ae20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqldbretry.go @@ -0,0 +1,74 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbretryOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlDb +} + +// SqlDbretry ... +func (c DatabaseMigrationsClient) SqlDbretry(ctx context.Context, id DatabaseMigrationId, input MigrationOperationInput) (result SqlDbretryOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/retry", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlDbretryThenPoll performs SqlDbretry then polls until it's completed +func (c DatabaseMigrationsClient) SqlDbretryThenPoll(ctx context.Context, id DatabaseMigrationId, input MigrationOperationInput) error { + result, err := c.SqlDbretry(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlDbretry: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlDbretry: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicancel.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicancel.go new file mode 100644 index 00000000000..5bcea269b6c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicancel.go @@ -0,0 +1,74 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMicancelOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// SqlMicancel ... +func (c DatabaseMigrationsClient) SqlMicancel(ctx context.Context, id Providers2DatabaseMigrationId, input MigrationOperationInput) (result SqlMicancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlMicancelThenPoll performs SqlMicancel then polls until it's completed +func (c DatabaseMigrationsClient) SqlMicancelThenPoll(ctx context.Context, id Providers2DatabaseMigrationId, input MigrationOperationInput) error { + result, err := c.SqlMicancel(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlMicancel: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlMicancel: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicreateorupdate.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicreateorupdate.go new file mode 100644 index 00000000000..7114ed316c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicreateorupdate.go @@ -0,0 +1,75 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMiCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlMi +} + +// SqlMiCreateOrUpdate ... +func (c DatabaseMigrationsClient) SqlMiCreateOrUpdate(ctx context.Context, id Providers2DatabaseMigrationId, input DatabaseMigrationSqlMi) (result SqlMiCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlMiCreateOrUpdateThenPoll performs SqlMiCreateOrUpdate then polls until it's completed +func (c DatabaseMigrationsClient) SqlMiCreateOrUpdateThenPoll(ctx context.Context, id Providers2DatabaseMigrationId, input DatabaseMigrationSqlMi) error { + result, err := c.SqlMiCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlMiCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlMiCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicutover.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicutover.go new file mode 100644 index 00000000000..a19435b9a9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmicutover.go @@ -0,0 +1,74 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMicutoverOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// SqlMicutover ... +func (c DatabaseMigrationsClient) SqlMicutover(ctx context.Context, id Providers2DatabaseMigrationId, input MigrationOperationInput) (result SqlMicutoverOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cutover", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlMicutoverThenPoll performs SqlMicutover then polls until it's completed +func (c DatabaseMigrationsClient) SqlMicutoverThenPoll(ctx context.Context, id Providers2DatabaseMigrationId, input MigrationOperationInput) error { + result, err := c.SqlMicutover(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlMicutover: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlMicutover: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmidelete.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmidelete.go new file mode 100644 index 00000000000..11d79da629e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmidelete.go @@ -0,0 +1,101 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMiDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlMi +} + +type SqlMiDeleteOperationOptions struct { + Force *bool +} + +func DefaultSqlMiDeleteOperationOptions() SqlMiDeleteOperationOptions { + return SqlMiDeleteOperationOptions{} +} + +func (o SqlMiDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o SqlMiDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o SqlMiDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Force != nil { + out.Append("force", fmt.Sprintf("%v", *o.Force)) + } + return &out +} + +// SqlMiDelete ... +func (c DatabaseMigrationsClient) SqlMiDelete(ctx context.Context, id Providers2DatabaseMigrationId, options SqlMiDeleteOperationOptions) (result SqlMiDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlMiDeleteThenPoll performs SqlMiDelete then polls until it's completed +func (c DatabaseMigrationsClient) SqlMiDeleteThenPoll(ctx context.Context, id Providers2DatabaseMigrationId, options SqlMiDeleteOperationOptions) error { + result, err := c.SqlMiDelete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing SqlMiDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlMiDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmiget.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmiget.go new file mode 100644 index 00000000000..07c2dad22db --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlmiget.go @@ -0,0 +1,87 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMiGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlMi +} + +type SqlMiGetOperationOptions struct { + Expand *string + MigrationOperationId *string +} + +func DefaultSqlMiGetOperationOptions() SqlMiGetOperationOptions { + return SqlMiGetOperationOptions{} +} + +func (o SqlMiGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o SqlMiGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o SqlMiGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + if o.MigrationOperationId != nil { + out.Append("migrationOperationId", fmt.Sprintf("%v", *o.MigrationOperationId)) + } + return &out +} + +// SqlMiGet ... +func (c DatabaseMigrationsClient) SqlMiGet(ctx context.Context, id Providers2DatabaseMigrationId, options SqlMiGetOperationOptions) (result SqlMiGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DatabaseMigrationSqlMi + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcancel.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcancel.go new file mode 100644 index 00000000000..1194a7b55a5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcancel.go @@ -0,0 +1,74 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlVMcancelOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// SqlVMcancel ... +func (c DatabaseMigrationsClient) SqlVMcancel(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, input MigrationOperationInput) (result SqlVMcancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlVMcancelThenPoll performs SqlVMcancel then polls until it's completed +func (c DatabaseMigrationsClient) SqlVMcancelThenPoll(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, input MigrationOperationInput) error { + result, err := c.SqlVMcancel(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlVMcancel: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlVMcancel: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcreateorupdate.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcreateorupdate.go new file mode 100644 index 00000000000..4af8bffcf63 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcreateorupdate.go @@ -0,0 +1,75 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlVMCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlVM +} + +// SqlVMCreateOrUpdate ... +func (c DatabaseMigrationsClient) SqlVMCreateOrUpdate(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, input DatabaseMigrationSqlVM) (result SqlVMCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlVMCreateOrUpdateThenPoll performs SqlVMCreateOrUpdate then polls until it's completed +func (c DatabaseMigrationsClient) SqlVMCreateOrUpdateThenPoll(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, input DatabaseMigrationSqlVM) error { + result, err := c.SqlVMCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlVMCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlVMCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcutover.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcutover.go new file mode 100644 index 00000000000..933a0065253 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmcutover.go @@ -0,0 +1,74 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlVMcutoverOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// SqlVMcutover ... +func (c DatabaseMigrationsClient) SqlVMcutover(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, input MigrationOperationInput) (result SqlVMcutoverOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cutover", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SqlVMcutoverThenPoll performs SqlVMcutover then polls until it's completed +func (c DatabaseMigrationsClient) SqlVMcutoverThenPoll(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, input MigrationOperationInput) error { + result, err := c.SqlVMcutover(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SqlVMcutover: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SqlVMcutover: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmget.go b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmget.go new file mode 100644 index 00000000000..91f9a682105 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/method_sqlvmget.go @@ -0,0 +1,87 @@ +package databasemigrations + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlVMGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlVM +} + +type SqlVMGetOperationOptions struct { + Expand *string + MigrationOperationId *string +} + +func DefaultSqlVMGetOperationOptions() SqlVMGetOperationOptions { + return SqlVMGetOperationOptions{} +} + +func (o SqlVMGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o SqlVMGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o SqlVMGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + if o.MigrationOperationId != nil { + out.Append("migrationOperationId", fmt.Sprintf("%v", *o.MigrationOperationId)) + } + return &out +} + +// SqlVMGet ... +func (c DatabaseMigrationsClient) SqlVMGet(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, options SqlVMGetOperationOptions) (result SqlVMGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DatabaseMigrationSqlVM + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_azureblob.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_azureblob.go new file mode 100644 index 00000000000..31cc4135243 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_azureblob.go @@ -0,0 +1,16 @@ +package databasemigrations + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureBlob struct { + AccountKey *string `json:"accountKey,omitempty"` + AuthType *AuthType `json:"authType,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_backupconfiguration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_backupconfiguration.go new file mode 100644 index 00000000000..3f0d36e55b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_backupconfiguration.go @@ -0,0 +1,9 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupConfiguration struct { + SourceLocation *SourceLocation `json:"sourceLocation,omitempty"` + TargetLocation *TargetLocation `json:"targetLocation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_copyprogressdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_copyprogressdetails.go new file mode 100644 index 00000000000..b3230fbc90e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_copyprogressdetails.go @@ -0,0 +1,36 @@ +package databasemigrations + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CopyProgressDetails struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyStart *string `json:"copyStart,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + ParallelCopyType *string `json:"parallelCopyType,omitempty"` + RowsCopied *int64 `json:"rowsCopied,omitempty"` + RowsRead *int64 `json:"rowsRead,omitempty"` + Status *string `json:"status,omitempty"` + TableName *string `json:"tableName,omitempty"` + UsedParallelCopies *int64 `json:"usedParallelCopies,omitempty"` +} + +func (o *CopyProgressDetails) GetCopyStartAsTime() (*time.Time, error) { + if o.CopyStart == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CopyStart, "2006-01-02T15:04:05Z07:00") +} + +func (o *CopyProgressDetails) SetCopyStartAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CopyStart = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationbaseproperties.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationbaseproperties.go new file mode 100644 index 00000000000..52552319923 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationbaseproperties.go @@ -0,0 +1,92 @@ +package databasemigrations + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationBaseProperties interface { + DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl +} + +var _ DatabaseMigrationBaseProperties = BaseDatabaseMigrationBasePropertiesImpl{} + +type BaseDatabaseMigrationBasePropertiesImpl struct { + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s BaseDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s +} + +var _ DatabaseMigrationBaseProperties = RawDatabaseMigrationBasePropertiesImpl{} + +// RawDatabaseMigrationBasePropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawDatabaseMigrationBasePropertiesImpl struct { + databaseMigrationBaseProperties BaseDatabaseMigrationBasePropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s.databaseMigrationBaseProperties +} + +func UnmarshalDatabaseMigrationBasePropertiesImplementation(input []byte) (DatabaseMigrationBaseProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationBaseProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["kind"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseMigrationProperties") { + var out DatabaseMigrationProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoToCosmosDbMongo") { + var out DatabaseMigrationPropertiesCosmosDbMongo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + return out, nil + } + + var parent BaseDatabaseMigrationBasePropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseDatabaseMigrationBasePropertiesImpl: %+v", err) + } + + return RawDatabaseMigrationBasePropertiesImpl{ + databaseMigrationBaseProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationcosmosdbmongo.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationcosmosdbmongo.go new file mode 100644 index 00000000000..17e1780e66f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationcosmosdbmongo.go @@ -0,0 +1,16 @@ +package databasemigrations + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationCosmosDbMongo struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseMigrationPropertiesCosmosDbMongo `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationproperties.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationproperties.go new file mode 100644 index 00000000000..74f788e8cef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationproperties.go @@ -0,0 +1,98 @@ +package databasemigrations + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationProperties{} + +type DatabaseMigrationProperties struct { + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceSqlConnection *SqlConnectionInformation `json:"sourceSqlConnection,omitempty"` + TargetDatabaseCollation *string `json:"targetDatabaseCollation,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationProperties) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationProperties) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationProperties) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationProperties{} + +func (s DatabaseMigrationProperties) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationProperties: %+v", err) + } + + decoded["kind"] = "DatabaseMigrationProperties" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiescosmosdbmongo.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiescosmosdbmongo.go new file mode 100644 index 00000000000..a998fdff3ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiescosmosdbmongo.go @@ -0,0 +1,97 @@ +package databasemigrations + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesCosmosDbMongo{} + +type DatabaseMigrationPropertiesCosmosDbMongo struct { + CollectionList *[]MongoMigrationCollection `json:"collectionList,omitempty"` + SourceMongoConnection *MongoConnectionInformation `json:"sourceMongoConnection,omitempty"` + TargetMongoConnection *MongoConnectionInformation `json:"targetMongoConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationPropertiesCosmosDbMongo{} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesCosmosDbMongo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + decoded["kind"] = "MongoToCosmosDbMongo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqldb.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqldb.go new file mode 100644 index 00000000000..a5d8806a0ae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqldb.go @@ -0,0 +1,71 @@ +package databasemigrations + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlDb{} + +type DatabaseMigrationPropertiesSqlDb struct { + MigrationStatusDetails *SqlDbMigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *SqlDbOfflineConfiguration `json:"offlineConfiguration,omitempty"` + TableList *[]string `json:"tableList,omitempty"` + TargetSqlConnection *SqlConnectionInformation `json:"targetSqlConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlDb) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlDb{} + +func (s DatabaseMigrationPropertiesSqlDb) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlDb + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + decoded["kind"] = "SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqlmi.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqlmi.go new file mode 100644 index 00000000000..6f0e124c67d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqlmi.go @@ -0,0 +1,70 @@ +package databasemigrations + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlMi{} + +type DatabaseMigrationPropertiesSqlMi struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlMi) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlMi{} + +func (s DatabaseMigrationPropertiesSqlMi) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlMi + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + decoded["kind"] = "SqlMi" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqlvm.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqlvm.go new file mode 100644 index 00000000000..88861a7e8ce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationpropertiessqlvm.go @@ -0,0 +1,70 @@ +package databasemigrations + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlVM{} + +type DatabaseMigrationPropertiesSqlVM struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlVM) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlVM{} + +func (s DatabaseMigrationPropertiesSqlVM) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlVM + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + decoded["kind"] = "SqlVm" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqldb.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqldb.go new file mode 100644 index 00000000000..58e9ef456c6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqldb.go @@ -0,0 +1,16 @@ +package databasemigrations + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationSqlDb struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseMigrationPropertiesSqlDb `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqlmi.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqlmi.go new file mode 100644 index 00000000000..f2c9ce1995b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqlmi.go @@ -0,0 +1,16 @@ +package databasemigrations + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationSqlMi struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseMigrationPropertiesSqlMi `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqlvm.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqlvm.go new file mode 100644 index 00000000000..ecd289d7843 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_databasemigrationsqlvm.go @@ -0,0 +1,16 @@ +package databasemigrations + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationSqlVM struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseMigrationPropertiesSqlVM `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_errorinfo.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_errorinfo.go new file mode 100644 index 00000000000..25b7537aa8c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_errorinfo.go @@ -0,0 +1,9 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ErrorInfo struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_migrationoperationinput.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_migrationoperationinput.go new file mode 100644 index 00000000000..c8670b99f63 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_migrationoperationinput.go @@ -0,0 +1,8 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationOperationInput struct { + MigrationOperationId *string `json:"migrationOperationId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_migrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_migrationstatusdetails.go new file mode 100644 index 00000000000..f136e2555ed --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_migrationstatusdetails.go @@ -0,0 +1,20 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationStatusDetails struct { + ActiveBackupSets *[]SqlBackupSetInfo `json:"activeBackupSets,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + CompleteRestoreErrorMessage *string `json:"completeRestoreErrorMessage,omitempty"` + CurrentRestoringFilename *string `json:"currentRestoringFilename,omitempty"` + FileUploadBlockingErrors *[]string `json:"fileUploadBlockingErrors,omitempty"` + FullBackupSetInfo *SqlBackupSetInfo `json:"fullBackupSetInfo,omitempty"` + InvalidFiles *[]string `json:"invalidFiles,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *SqlBackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + LastRestoredFilename *string `json:"lastRestoredFilename,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + PendingLogBackupsCount *int64 `json:"pendingLogBackupsCount,omitempty"` + RestoreBlockingReason *string `json:"restoreBlockingReason,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongoconnectioninformation.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongoconnectioninformation.go new file mode 100644 index 00000000000..24998af973b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongoconnectioninformation.go @@ -0,0 +1,13 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoConnectionInformation struct { + ConnectionString *string `json:"connectionString,omitempty"` + Host *string `json:"host,omitempty"` + Password *string `json:"password,omitempty"` + Port *int64 `json:"port,omitempty"` + UseSsl *bool `json:"useSsl,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongomigrationcollection.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongomigrationcollection.go new file mode 100644 index 00000000000..afea679ee1a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongomigrationcollection.go @@ -0,0 +1,12 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationCollection struct { + MigrationProgressDetails *MongoMigrationProgressDetails `json:"migrationProgressDetails,omitempty"` + SourceCollection *string `json:"sourceCollection,omitempty"` + SourceDatabase *string `json:"sourceDatabase,omitempty"` + TargetCollection *string `json:"targetCollection,omitempty"` + TargetDatabase *string `json:"targetDatabase,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongomigrationprogressdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongomigrationprogressdetails.go new file mode 100644 index 00000000000..85b7996fc16 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_mongomigrationprogressdetails.go @@ -0,0 +1,12 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationProgressDetails struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + MigrationError *string `json:"migrationError,omitempty"` + MigrationStatus *MongoMigrationStatus `json:"migrationStatus,omitempty"` + ProcessedDocumentCount *int64 `json:"processedDocumentCount,omitempty"` + SourceDocumentCount *int64 `json:"sourceDocumentCount,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_offlineconfiguration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_offlineconfiguration.go new file mode 100644 index 00000000000..6126c39ef3b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_offlineconfiguration.go @@ -0,0 +1,9 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OfflineConfiguration struct { + LastBackupName *string `json:"lastBackupName,omitempty"` + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_sourcelocation.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sourcelocation.go new file mode 100644 index 00000000000..ca644771951 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sourcelocation.go @@ -0,0 +1,10 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SourceLocation struct { + AzureBlob *AzureBlob `json:"azureBlob,omitempty"` + FileShare *SqlFileShare `json:"fileShare,omitempty"` + FileStorageType *string `json:"fileStorageType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlbackupfileinfo.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlbackupfileinfo.go new file mode 100644 index 00000000000..e3090c9833a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlbackupfileinfo.go @@ -0,0 +1,15 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupFileInfo struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileName *string `json:"fileName,omitempty"` + Status *string `json:"status,omitempty"` + TotalSize *int64 `json:"totalSize,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlbackupsetinfo.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlbackupsetinfo.go new file mode 100644 index 00000000000..4077fefe715 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlbackupsetinfo.go @@ -0,0 +1,48 @@ +package databasemigrations + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupSetInfo struct { + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *string `json:"backupType,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + FirstLSN *string `json:"firstLSN,omitempty"` + HasBackupChecksums *bool `json:"hasBackupChecksums,omitempty"` + IgnoreReasons *[]string `json:"ignoreReasons,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLSN *string `json:"lastLSN,omitempty"` + ListOfBackupFiles *[]SqlBackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *SqlBackupSetInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} + +func (o *SqlBackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlconnectioninformation.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlconnectioninformation.go new file mode 100644 index 00000000000..7c4aad3038b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlconnectioninformation.go @@ -0,0 +1,13 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlConnectionInformation struct { + Authentication *string `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Password *string `json:"password,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqldbmigrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqldbmigrationstatusdetails.go new file mode 100644 index 00000000000..d431a196c48 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqldbmigrationstatusdetails.go @@ -0,0 +1,10 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbMigrationStatusDetails struct { + ListOfCopyProgressDetails *[]CopyProgressDetails `json:"listOfCopyProgressDetails,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + SqlDataCopyErrors *[]string `json:"sqlDataCopyErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqldbofflineconfiguration.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqldbofflineconfiguration.go new file mode 100644 index 00000000000..cbf2f40fd20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqldbofflineconfiguration.go @@ -0,0 +1,8 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbOfflineConfiguration struct { + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlfileshare.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlfileshare.go new file mode 100644 index 00000000000..cbb23bc0ca0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_sqlfileshare.go @@ -0,0 +1,10 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlFileShare struct { + Password *string `json:"password,omitempty"` + Path *string `json:"path,omitempty"` + Username *string `json:"username,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/model_targetlocation.go b/resource-manager/datamigration/2025-06-30/databasemigrations/model_targetlocation.go new file mode 100644 index 00000000000..4f141f8086c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/model_targetlocation.go @@ -0,0 +1,9 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TargetLocation struct { + AccountKey *string `json:"accountKey,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/predicates.go b/resource-manager/datamigration/2025-06-30/databasemigrations/predicates.go new file mode 100644 index 00000000000..f695734190a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/predicates.go @@ -0,0 +1,27 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationCosmosDbMongoOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p DatabaseMigrationCosmosDbMongoOperationPredicate) Matches(input DatabaseMigrationCosmosDbMongo) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrations/version.go b/resource-manager/datamigration/2025-06-30/databasemigrations/version.go new file mode 100644 index 00000000000..67c1eb931e8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrations/version.go @@ -0,0 +1,10 @@ +package databasemigrations + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/databasemigrations/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/README.md b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/README.md new file mode 100644 index 00000000000..3454dbb8c5e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/README.md @@ -0,0 +1,32 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm` Documentation + +The `databasemigrationssqlvm` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm" +``` + + +### Client Initialization + +```go +client := databasemigrationssqlvm.NewDatabaseMigrationsSqlVMClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `DatabaseMigrationsSqlVMClient.Delete` + +```go +ctx := context.TODO() +id := databasemigrationssqlvm.NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName") + +if err := client.DeleteThenPoll(ctx, id, databasemigrationssqlvm.DefaultDeleteOperationOptions()); err != nil { + // handle the error +} +``` diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/client.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/client.go new file mode 100644 index 00000000000..497d64402f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/client.go @@ -0,0 +1,26 @@ +package databasemigrationssqlvm + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationsSqlVMClient struct { + Client *resourcemanager.Client +} + +func NewDatabaseMigrationsSqlVMClientWithBaseURI(sdkApi sdkEnv.Api) (*DatabaseMigrationsSqlVMClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "databasemigrationssqlvm", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating DatabaseMigrationsSqlVMClient: %+v", err) + } + + return &DatabaseMigrationsSqlVMClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/constants.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/constants.go new file mode 100644 index 00000000000..94a13b899cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/constants.go @@ -0,0 +1,198 @@ +package databasemigrationssqlvm + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthType string + +const ( + AuthTypeAccountKey AuthType = "AccountKey" + AuthTypeManagedIdentity AuthType = "ManagedIdentity" +) + +func PossibleValuesForAuthType() []string { + return []string{ + string(AuthTypeAccountKey), + string(AuthTypeManagedIdentity), + } +} + +func (s *AuthType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthType(input string) (*AuthType, error) { + vals := map[string]AuthType{ + "accountkey": AuthTypeAccountKey, + "managedidentity": AuthTypeManagedIdentity, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthType(input) + return &out, nil +} + +type MongoMigrationStatus string + +const ( + MongoMigrationStatusCanceled MongoMigrationStatus = "Canceled" + MongoMigrationStatusCompleted MongoMigrationStatus = "Completed" + MongoMigrationStatusFailed MongoMigrationStatus = "Failed" + MongoMigrationStatusInProgress MongoMigrationStatus = "InProgress" + MongoMigrationStatusNotStarted MongoMigrationStatus = "NotStarted" +) + +func PossibleValuesForMongoMigrationStatus() []string { + return []string{ + string(MongoMigrationStatusCanceled), + string(MongoMigrationStatusCompleted), + string(MongoMigrationStatusFailed), + string(MongoMigrationStatusInProgress), + string(MongoMigrationStatusNotStarted), + } +} + +func (s *MongoMigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoMigrationStatus(input string) (*MongoMigrationStatus, error) { + vals := map[string]MongoMigrationStatus{ + "canceled": MongoMigrationStatusCanceled, + "completed": MongoMigrationStatusCompleted, + "failed": MongoMigrationStatusFailed, + "inprogress": MongoMigrationStatusInProgress, + "notstarted": MongoMigrationStatusNotStarted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoMigrationStatus(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type ResourceType string + +const ( + ResourceTypeMongoToCosmosDbMongo ResourceType = "MongoToCosmosDbMongo" + ResourceTypeSqlDb ResourceType = "SqlDb" + ResourceTypeSqlMi ResourceType = "SqlMi" + ResourceTypeSqlVM ResourceType = "SqlVm" +) + +func PossibleValuesForResourceType() []string { + return []string{ + string(ResourceTypeMongoToCosmosDbMongo), + string(ResourceTypeSqlDb), + string(ResourceTypeSqlMi), + string(ResourceTypeSqlVM), + } +} + +func (s *ResourceType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceType(input string) (*ResourceType, error) { + vals := map[string]ResourceType{ + "mongotocosmosdbmongo": ResourceTypeMongoToCosmosDbMongo, + "sqldb": ResourceTypeSqlDb, + "sqlmi": ResourceTypeSqlMi, + "sqlvm": ResourceTypeSqlVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceType(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/id_sqlvirtualmachineproviders2databasemigration.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/id_sqlvirtualmachineproviders2databasemigration.go new file mode 100644 index 00000000000..aa5d3059e1f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/id_sqlvirtualmachineproviders2databasemigration.go @@ -0,0 +1,141 @@ +package databasemigrationssqlvm + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SqlVirtualMachineProviders2DatabaseMigrationId{}) +} + +var _ resourceids.ResourceId = &SqlVirtualMachineProviders2DatabaseMigrationId{} + +// SqlVirtualMachineProviders2DatabaseMigrationId is a struct representing the Resource ID for a Sql Virtual Machine Providers 2 Database Migration +type SqlVirtualMachineProviders2DatabaseMigrationId struct { + SubscriptionId string + ResourceGroupName string + SqlVirtualMachineName string + DatabaseMigrationName string +} + +// NewSqlVirtualMachineProviders2DatabaseMigrationID returns a new SqlVirtualMachineProviders2DatabaseMigrationId struct +func NewSqlVirtualMachineProviders2DatabaseMigrationID(subscriptionId string, resourceGroupName string, sqlVirtualMachineName string, databaseMigrationName string) SqlVirtualMachineProviders2DatabaseMigrationId { + return SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + SqlVirtualMachineName: sqlVirtualMachineName, + DatabaseMigrationName: databaseMigrationName, + } +} + +// ParseSqlVirtualMachineProviders2DatabaseMigrationID parses 'input' into a SqlVirtualMachineProviders2DatabaseMigrationId +func ParseSqlVirtualMachineProviders2DatabaseMigrationID(input string) (*SqlVirtualMachineProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&SqlVirtualMachineProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SqlVirtualMachineProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively parses 'input' case-insensitively into a SqlVirtualMachineProviders2DatabaseMigrationId +// note: this method should only be used for API response data and not user input +func ParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively(input string) (*SqlVirtualMachineProviders2DatabaseMigrationId, error) { + parser := resourceids.NewParserFromResourceIdType(&SqlVirtualMachineProviders2DatabaseMigrationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SqlVirtualMachineProviders2DatabaseMigrationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SqlVirtualMachineProviders2DatabaseMigrationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.SqlVirtualMachineName, ok = input.Parsed["sqlVirtualMachineName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "sqlVirtualMachineName", input) + } + + if id.DatabaseMigrationName, ok = input.Parsed["databaseMigrationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "databaseMigrationName", input) + } + + return nil +} + +// ValidateSqlVirtualMachineProviders2DatabaseMigrationID checks that 'input' can be parsed as a Sql Virtual Machine Providers 2 Database Migration ID +func ValidateSqlVirtualMachineProviders2DatabaseMigrationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSqlVirtualMachineProviders2DatabaseMigrationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Sql Virtual Machine Providers 2 Database Migration ID +func (id SqlVirtualMachineProviders2DatabaseMigrationId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/%s/providers/Microsoft.DataMigration/databaseMigrations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.SqlVirtualMachineName, id.DatabaseMigrationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Sql Virtual Machine Providers 2 Database Migration ID +func (id SqlVirtualMachineProviders2DatabaseMigrationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftSqlVirtualMachine", "Microsoft.SqlVirtualMachine", "Microsoft.SqlVirtualMachine"), + resourceids.StaticSegment("staticSqlVirtualMachines", "sqlVirtualMachines", "sqlVirtualMachines"), + resourceids.UserSpecifiedSegment("sqlVirtualMachineName", "sqlVirtualMachineName"), + resourceids.StaticSegment("staticProviders2", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticDatabaseMigrations", "databaseMigrations", "databaseMigrations"), + resourceids.UserSpecifiedSegment("databaseMigrationName", "databaseMigrationName"), + } +} + +// String returns a human-readable description of this Sql Virtual Machine Providers 2 Database Migration ID +func (id SqlVirtualMachineProviders2DatabaseMigrationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Sql Virtual Machine Name: %q", id.SqlVirtualMachineName), + fmt.Sprintf("Database Migration Name: %q", id.DatabaseMigrationName), + } + return fmt.Sprintf("Sql Virtual Machine Providers 2 Database Migration (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/id_sqlvirtualmachineproviders2databasemigration_test.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/id_sqlvirtualmachineproviders2databasemigration_test.go new file mode 100644 index 00000000000..7b3082309d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/id_sqlvirtualmachineproviders2databasemigration_test.go @@ -0,0 +1,357 @@ +package databasemigrationssqlvm + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SqlVirtualMachineProviders2DatabaseMigrationId{} + +func TestNewSqlVirtualMachineProviders2DatabaseMigrationID(t *testing.T) { + id := NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.SqlVirtualMachineName != "sqlVirtualMachineName" { + t.Fatalf("Expected %q but got %q for Segment 'SqlVirtualMachineName'", id.SqlVirtualMachineName, "sqlVirtualMachineName") + } + + if id.DatabaseMigrationName != "databaseMigrationName" { + t.Fatalf("Expected %q but got %q for Segment 'DatabaseMigrationName'", id.DatabaseMigrationName, "databaseMigrationName") + } +} + +func TestFormatSqlVirtualMachineProviders2DatabaseMigrationID(t *testing.T) { + actual := NewSqlVirtualMachineProviders2DatabaseMigrationID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlVirtualMachineName", "databaseMigrationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSqlVirtualMachineProviders2DatabaseMigrationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SqlVirtualMachineProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SqlVirtualMachineName: "sqlVirtualMachineName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSqlVirtualMachineProviders2DatabaseMigrationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SqlVirtualMachineName != v.Expected.SqlVirtualMachineName { + t.Fatalf("Expected %q but got %q for SqlVirtualMachineName", v.Expected.SqlVirtualMachineName, actual.SqlVirtualMachineName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SqlVirtualMachineProviders2DatabaseMigrationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName", + Expected: &SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SqlVirtualMachineName: "sqlVirtualMachineName", + DatabaseMigrationName: "databaseMigrationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/sqlVirtualMachineName/providers/Microsoft.DataMigration/databaseMigrations/databaseMigrationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe", + Expected: &SqlVirtualMachineProviders2DatabaseMigrationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + SqlVirtualMachineName: "sQlViRtUaLmAcHiNeNaMe", + DatabaseMigrationName: "dAtAbAsEmIgRaTiOnNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.sQlViRtUaLmAcHiNe/sQlViRtUaLmAcHiNeS/sQlViRtUaLmAcHiNeNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/dAtAbAsEmIgRaTiOnS/dAtAbAsEmIgRaTiOnNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSqlVirtualMachineProviders2DatabaseMigrationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SqlVirtualMachineName != v.Expected.SqlVirtualMachineName { + t.Fatalf("Expected %q but got %q for SqlVirtualMachineName", v.Expected.SqlVirtualMachineName, actual.SqlVirtualMachineName) + } + + if actual.DatabaseMigrationName != v.Expected.DatabaseMigrationName { + t.Fatalf("Expected %q but got %q for DatabaseMigrationName", v.Expected.DatabaseMigrationName, actual.DatabaseMigrationName) + } + + } +} + +func TestSegmentsForSqlVirtualMachineProviders2DatabaseMigrationId(t *testing.T) { + segments := SqlVirtualMachineProviders2DatabaseMigrationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SqlVirtualMachineProviders2DatabaseMigrationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/method_delete.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/method_delete.go new file mode 100644 index 00000000000..140a74085dc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/method_delete.go @@ -0,0 +1,101 @@ +package databasemigrationssqlvm + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DatabaseMigrationSqlVM +} + +type DeleteOperationOptions struct { + Force *bool +} + +func DefaultDeleteOperationOptions() DeleteOperationOptions { + return DeleteOperationOptions{} +} + +func (o DeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o DeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o DeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Force != nil { + out.Append("force", fmt.Sprintf("%v", *o.Force)) + } + return &out +} + +// Delete ... +func (c DatabaseMigrationsSqlVMClient) Delete(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, options DeleteOperationOptions) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c DatabaseMigrationsSqlVMClient) DeleteThenPoll(ctx context.Context, id SqlVirtualMachineProviders2DatabaseMigrationId, options DeleteOperationOptions) error { + result, err := c.Delete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_azureblob.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_azureblob.go new file mode 100644 index 00000000000..cd88e38fd23 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_azureblob.go @@ -0,0 +1,16 @@ +package databasemigrationssqlvm + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureBlob struct { + AccountKey *string `json:"accountKey,omitempty"` + AuthType *AuthType `json:"authType,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_backupconfiguration.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_backupconfiguration.go new file mode 100644 index 00000000000..4bb78860dcf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_backupconfiguration.go @@ -0,0 +1,9 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupConfiguration struct { + SourceLocation *SourceLocation `json:"sourceLocation,omitempty"` + TargetLocation *TargetLocation `json:"targetLocation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_copyprogressdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_copyprogressdetails.go new file mode 100644 index 00000000000..435ac43d61f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_copyprogressdetails.go @@ -0,0 +1,36 @@ +package databasemigrationssqlvm + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CopyProgressDetails struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyStart *string `json:"copyStart,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + ParallelCopyType *string `json:"parallelCopyType,omitempty"` + RowsCopied *int64 `json:"rowsCopied,omitempty"` + RowsRead *int64 `json:"rowsRead,omitempty"` + Status *string `json:"status,omitempty"` + TableName *string `json:"tableName,omitempty"` + UsedParallelCopies *int64 `json:"usedParallelCopies,omitempty"` +} + +func (o *CopyProgressDetails) GetCopyStartAsTime() (*time.Time, error) { + if o.CopyStart == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CopyStart, "2006-01-02T15:04:05Z07:00") +} + +func (o *CopyProgressDetails) SetCopyStartAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CopyStart = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationbaseproperties.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationbaseproperties.go new file mode 100644 index 00000000000..b53195de9c1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationbaseproperties.go @@ -0,0 +1,92 @@ +package databasemigrationssqlvm + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationBaseProperties interface { + DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl +} + +var _ DatabaseMigrationBaseProperties = BaseDatabaseMigrationBasePropertiesImpl{} + +type BaseDatabaseMigrationBasePropertiesImpl struct { + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s BaseDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s +} + +var _ DatabaseMigrationBaseProperties = RawDatabaseMigrationBasePropertiesImpl{} + +// RawDatabaseMigrationBasePropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawDatabaseMigrationBasePropertiesImpl struct { + databaseMigrationBaseProperties BaseDatabaseMigrationBasePropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s.databaseMigrationBaseProperties +} + +func UnmarshalDatabaseMigrationBasePropertiesImplementation(input []byte) (DatabaseMigrationBaseProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationBaseProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["kind"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseMigrationProperties") { + var out DatabaseMigrationProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoToCosmosDbMongo") { + var out DatabaseMigrationPropertiesCosmosDbMongo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + return out, nil + } + + var parent BaseDatabaseMigrationBasePropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseDatabaseMigrationBasePropertiesImpl: %+v", err) + } + + return RawDatabaseMigrationBasePropertiesImpl{ + databaseMigrationBaseProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationproperties.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationproperties.go new file mode 100644 index 00000000000..024751ce68d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationproperties.go @@ -0,0 +1,98 @@ +package databasemigrationssqlvm + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationProperties{} + +type DatabaseMigrationProperties struct { + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceSqlConnection *SqlConnectionInformation `json:"sourceSqlConnection,omitempty"` + TargetDatabaseCollation *string `json:"targetDatabaseCollation,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationProperties) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationProperties) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationProperties) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationProperties{} + +func (s DatabaseMigrationProperties) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationProperties: %+v", err) + } + + decoded["kind"] = "DatabaseMigrationProperties" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiescosmosdbmongo.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiescosmosdbmongo.go new file mode 100644 index 00000000000..ac001e80637 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiescosmosdbmongo.go @@ -0,0 +1,97 @@ +package databasemigrationssqlvm + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesCosmosDbMongo{} + +type DatabaseMigrationPropertiesCosmosDbMongo struct { + CollectionList *[]MongoMigrationCollection `json:"collectionList,omitempty"` + SourceMongoConnection *MongoConnectionInformation `json:"sourceMongoConnection,omitempty"` + TargetMongoConnection *MongoConnectionInformation `json:"targetMongoConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationPropertiesCosmosDbMongo{} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesCosmosDbMongo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + decoded["kind"] = "MongoToCosmosDbMongo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqldb.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqldb.go new file mode 100644 index 00000000000..0420e0172fd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqldb.go @@ -0,0 +1,71 @@ +package databasemigrationssqlvm + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlDb{} + +type DatabaseMigrationPropertiesSqlDb struct { + MigrationStatusDetails *SqlDbMigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *SqlDbOfflineConfiguration `json:"offlineConfiguration,omitempty"` + TableList *[]string `json:"tableList,omitempty"` + TargetSqlConnection *SqlConnectionInformation `json:"targetSqlConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlDb) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlDb{} + +func (s DatabaseMigrationPropertiesSqlDb) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlDb + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + decoded["kind"] = "SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqlmi.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqlmi.go new file mode 100644 index 00000000000..a44b64ceb87 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqlmi.go @@ -0,0 +1,70 @@ +package databasemigrationssqlvm + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlMi{} + +type DatabaseMigrationPropertiesSqlMi struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlMi) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlMi{} + +func (s DatabaseMigrationPropertiesSqlMi) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlMi + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + decoded["kind"] = "SqlMi" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqlvm.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqlvm.go new file mode 100644 index 00000000000..b929e3363d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationpropertiessqlvm.go @@ -0,0 +1,70 @@ +package databasemigrationssqlvm + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlVM{} + +type DatabaseMigrationPropertiesSqlVM struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlVM) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlVM{} + +func (s DatabaseMigrationPropertiesSqlVM) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlVM + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + decoded["kind"] = "SqlVm" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationsqlvm.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationsqlvm.go new file mode 100644 index 00000000000..62989786fcb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_databasemigrationsqlvm.go @@ -0,0 +1,16 @@ +package databasemigrationssqlvm + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationSqlVM struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseMigrationPropertiesSqlVM `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_errorinfo.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_errorinfo.go new file mode 100644 index 00000000000..ae920ad129f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_errorinfo.go @@ -0,0 +1,9 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ErrorInfo struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_migrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_migrationstatusdetails.go new file mode 100644 index 00000000000..e93843d3b4d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_migrationstatusdetails.go @@ -0,0 +1,20 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationStatusDetails struct { + ActiveBackupSets *[]SqlBackupSetInfo `json:"activeBackupSets,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + CompleteRestoreErrorMessage *string `json:"completeRestoreErrorMessage,omitempty"` + CurrentRestoringFilename *string `json:"currentRestoringFilename,omitempty"` + FileUploadBlockingErrors *[]string `json:"fileUploadBlockingErrors,omitempty"` + FullBackupSetInfo *SqlBackupSetInfo `json:"fullBackupSetInfo,omitempty"` + InvalidFiles *[]string `json:"invalidFiles,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *SqlBackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + LastRestoredFilename *string `json:"lastRestoredFilename,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + PendingLogBackupsCount *int64 `json:"pendingLogBackupsCount,omitempty"` + RestoreBlockingReason *string `json:"restoreBlockingReason,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongoconnectioninformation.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongoconnectioninformation.go new file mode 100644 index 00000000000..61db74e0cb1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongoconnectioninformation.go @@ -0,0 +1,13 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoConnectionInformation struct { + ConnectionString *string `json:"connectionString,omitempty"` + Host *string `json:"host,omitempty"` + Password *string `json:"password,omitempty"` + Port *int64 `json:"port,omitempty"` + UseSsl *bool `json:"useSsl,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongomigrationcollection.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongomigrationcollection.go new file mode 100644 index 00000000000..dfb00b42b3c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongomigrationcollection.go @@ -0,0 +1,12 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationCollection struct { + MigrationProgressDetails *MongoMigrationProgressDetails `json:"migrationProgressDetails,omitempty"` + SourceCollection *string `json:"sourceCollection,omitempty"` + SourceDatabase *string `json:"sourceDatabase,omitempty"` + TargetCollection *string `json:"targetCollection,omitempty"` + TargetDatabase *string `json:"targetDatabase,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongomigrationprogressdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongomigrationprogressdetails.go new file mode 100644 index 00000000000..20f30b4a221 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_mongomigrationprogressdetails.go @@ -0,0 +1,12 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationProgressDetails struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + MigrationError *string `json:"migrationError,omitempty"` + MigrationStatus *MongoMigrationStatus `json:"migrationStatus,omitempty"` + ProcessedDocumentCount *int64 `json:"processedDocumentCount,omitempty"` + SourceDocumentCount *int64 `json:"sourceDocumentCount,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_offlineconfiguration.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_offlineconfiguration.go new file mode 100644 index 00000000000..d13fe15e50e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_offlineconfiguration.go @@ -0,0 +1,9 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OfflineConfiguration struct { + LastBackupName *string `json:"lastBackupName,omitempty"` + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sourcelocation.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sourcelocation.go new file mode 100644 index 00000000000..573a91e0014 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sourcelocation.go @@ -0,0 +1,10 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SourceLocation struct { + AzureBlob *AzureBlob `json:"azureBlob,omitempty"` + FileShare *SqlFileShare `json:"fileShare,omitempty"` + FileStorageType *string `json:"fileStorageType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlbackupfileinfo.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlbackupfileinfo.go new file mode 100644 index 00000000000..c4fef475e07 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlbackupfileinfo.go @@ -0,0 +1,15 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupFileInfo struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileName *string `json:"fileName,omitempty"` + Status *string `json:"status,omitempty"` + TotalSize *int64 `json:"totalSize,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlbackupsetinfo.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlbackupsetinfo.go new file mode 100644 index 00000000000..bb22c4ed603 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlbackupsetinfo.go @@ -0,0 +1,48 @@ +package databasemigrationssqlvm + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupSetInfo struct { + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *string `json:"backupType,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + FirstLSN *string `json:"firstLSN,omitempty"` + HasBackupChecksums *bool `json:"hasBackupChecksums,omitempty"` + IgnoreReasons *[]string `json:"ignoreReasons,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLSN *string `json:"lastLSN,omitempty"` + ListOfBackupFiles *[]SqlBackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *SqlBackupSetInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} + +func (o *SqlBackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlconnectioninformation.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlconnectioninformation.go new file mode 100644 index 00000000000..d2b3db31624 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlconnectioninformation.go @@ -0,0 +1,13 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlConnectionInformation struct { + Authentication *string `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Password *string `json:"password,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqldbmigrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqldbmigrationstatusdetails.go new file mode 100644 index 00000000000..8bdff65a96f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqldbmigrationstatusdetails.go @@ -0,0 +1,10 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbMigrationStatusDetails struct { + ListOfCopyProgressDetails *[]CopyProgressDetails `json:"listOfCopyProgressDetails,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + SqlDataCopyErrors *[]string `json:"sqlDataCopyErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqldbofflineconfiguration.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqldbofflineconfiguration.go new file mode 100644 index 00000000000..11f16c4af35 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqldbofflineconfiguration.go @@ -0,0 +1,8 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbOfflineConfiguration struct { + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlfileshare.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlfileshare.go new file mode 100644 index 00000000000..4dc830e24aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_sqlfileshare.go @@ -0,0 +1,10 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlFileShare struct { + Password *string `json:"password,omitempty"` + Path *string `json:"path,omitempty"` + Username *string `json:"username,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_targetlocation.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_targetlocation.go new file mode 100644 index 00000000000..4cd7621d55e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/model_targetlocation.go @@ -0,0 +1,9 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TargetLocation struct { + AccountKey *string `json:"accountKey,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/version.go b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/version.go new file mode 100644 index 00000000000..a716d58b4b6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/databasemigrationssqlvm/version.go @@ -0,0 +1,10 @@ +package databasemigrationssqlvm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/databasemigrationssqlvm/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/delete/client.go b/resource-manager/datamigration/2025-06-30/delete/client.go new file mode 100644 index 00000000000..a151c0362da --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/client.go @@ -0,0 +1,26 @@ +package delete + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DELETEClient struct { + Client *resourcemanager.Client +} + +func NewDELETEClientWithBaseURI(sdkApi sdkEnv.Api) (*DELETEClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "delete", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating DELETEClient: %+v", err) + } + + return &DELETEClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_file.go b/resource-manager/datamigration/2025-06-30/delete/id_file.go new file mode 100644 index 00000000000..55985ef3f00 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_file.go @@ -0,0 +1,148 @@ +package delete + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_file_test.go b/resource-manager/datamigration/2025-06-30/delete/id_file_test.go new file mode 100644 index 00000000000..4505477977f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_file_test.go @@ -0,0 +1,372 @@ +package delete + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_project.go b/resource-manager/datamigration/2025-06-30/delete/id_project.go new file mode 100644 index 00000000000..c843bcfe4cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_project.go @@ -0,0 +1,139 @@ +package delete + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_project_test.go b/resource-manager/datamigration/2025-06-30/delete/id_project_test.go new file mode 100644 index 00000000000..2ee87f3faec --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_project_test.go @@ -0,0 +1,327 @@ +package delete + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_service.go b/resource-manager/datamigration/2025-06-30/delete/id_service.go new file mode 100644 index 00000000000..fe66bd222fd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_service.go @@ -0,0 +1,130 @@ +package delete + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_service_test.go b/resource-manager/datamigration/2025-06-30/delete/id_service_test.go new file mode 100644 index 00000000000..88203a1e41b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_service_test.go @@ -0,0 +1,282 @@ +package delete + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_servicetask.go b/resource-manager/datamigration/2025-06-30/delete/id_servicetask.go new file mode 100644 index 00000000000..e1cc85bd9dd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_servicetask.go @@ -0,0 +1,139 @@ +package delete + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/delete/id_servicetask_test.go new file mode 100644 index 00000000000..ff9dedcc6a6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_servicetask_test.go @@ -0,0 +1,327 @@ +package delete + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_task.go b/resource-manager/datamigration/2025-06-30/delete/id_task.go new file mode 100644 index 00000000000..0a5e80021d7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_task.go @@ -0,0 +1,148 @@ +package delete + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/delete/id_task_test.go b/resource-manager/datamigration/2025-06-30/delete/id_task_test.go new file mode 100644 index 00000000000..356ed78b311 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/id_task_test.go @@ -0,0 +1,372 @@ +package delete + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/delete/method_filesdelete.go b/resource-manager/datamigration/2025-06-30/delete/method_filesdelete.go new file mode 100644 index 00000000000..9290ad260ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/method_filesdelete.go @@ -0,0 +1,47 @@ +package delete + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +// FilesDelete ... +func (c DELETEClient) FilesDelete(ctx context.Context, id FileId) (result FilesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/delete/method_projectsdelete.go b/resource-manager/datamigration/2025-06-30/delete/method_projectsdelete.go new file mode 100644 index 00000000000..f78f722c581 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/method_projectsdelete.go @@ -0,0 +1,77 @@ +package delete + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type ProjectsDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultProjectsDeleteOperationOptions() ProjectsDeleteOperationOptions { + return ProjectsDeleteOperationOptions{} +} + +func (o ProjectsDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ProjectsDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ProjectsDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ProjectsDelete ... +func (c DELETEClient) ProjectsDelete(ctx context.Context, id ProjectId, options ProjectsDeleteOperationOptions) (result ProjectsDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/delete/method_servicesdelete.go b/resource-manager/datamigration/2025-06-30/delete/method_servicesdelete.go new file mode 100644 index 00000000000..5f98f0b59c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/method_servicesdelete.go @@ -0,0 +1,100 @@ +package delete + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type ServicesDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultServicesDeleteOperationOptions() ServicesDeleteOperationOptions { + return ServicesDeleteOperationOptions{} +} + +func (o ServicesDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServicesDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServicesDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ServicesDelete ... +func (c DELETEClient) ServicesDelete(ctx context.Context, id ServiceId, options ServicesDeleteOperationOptions) (result ServicesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesDeleteThenPoll performs ServicesDelete then polls until it's completed +func (c DELETEClient) ServicesDeleteThenPoll(ctx context.Context, id ServiceId, options ServicesDeleteOperationOptions) error { + result, err := c.ServicesDelete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing ServicesDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/delete/method_servicetasksdelete.go b/resource-manager/datamigration/2025-06-30/delete/method_servicetasksdelete.go new file mode 100644 index 00000000000..9a6569fe221 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/method_servicetasksdelete.go @@ -0,0 +1,77 @@ +package delete + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type ServiceTasksDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultServiceTasksDeleteOperationOptions() ServiceTasksDeleteOperationOptions { + return ServiceTasksDeleteOperationOptions{} +} + +func (o ServiceTasksDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ServiceTasksDelete ... +func (c DELETEClient) ServiceTasksDelete(ctx context.Context, id ServiceTaskId, options ServiceTasksDeleteOperationOptions) (result ServiceTasksDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/delete/method_tasksdelete.go b/resource-manager/datamigration/2025-06-30/delete/method_tasksdelete.go new file mode 100644 index 00000000000..fd49246a727 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/method_tasksdelete.go @@ -0,0 +1,77 @@ +package delete + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type TasksDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultTasksDeleteOperationOptions() TasksDeleteOperationOptions { + return TasksDeleteOperationOptions{} +} + +func (o TasksDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// TasksDelete ... +func (c DELETEClient) TasksDelete(ctx context.Context, id TaskId, options TasksDeleteOperationOptions) (result TasksDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/delete/version.go b/resource-manager/datamigration/2025-06-30/delete/version.go new file mode 100644 index 00000000000..fbc2d8711e8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/delete/version.go @@ -0,0 +1,10 @@ +package delete + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/delete/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/fieresource/README.md b/resource-manager/datamigration/2025-06-30/fieresource/README.md new file mode 100644 index 00000000000..649a5935ca5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/README.md @@ -0,0 +1,41 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/fieresource` Documentation + +The `fieresource` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/fieresource" +``` + + +### Client Initialization + +```go +client := fieresource.NewFieResourceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `FieResourceClient.FilesCreateOrUpdate` + +```go +ctx := context.TODO() +id := fieresource.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +payload := fieresource.ProjectFile{ + // ... +} + + +read, err := client.FilesCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/fieresource/client.go b/resource-manager/datamigration/2025-06-30/fieresource/client.go new file mode 100644 index 00000000000..6840b7ba0cf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/client.go @@ -0,0 +1,26 @@ +package fieresource + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FieResourceClient struct { + Client *resourcemanager.Client +} + +func NewFieResourceClientWithBaseURI(sdkApi sdkEnv.Api) (*FieResourceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "fieresource", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating FieResourceClient: %+v", err) + } + + return &FieResourceClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/fieresource/id_file.go b/resource-manager/datamigration/2025-06-30/fieresource/id_file.go new file mode 100644 index 00000000000..d06ecb57be5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/id_file.go @@ -0,0 +1,148 @@ +package fieresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/fieresource/id_file_test.go b/resource-manager/datamigration/2025-06-30/fieresource/id_file_test.go new file mode 100644 index 00000000000..d73431e5cef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/id_file_test.go @@ -0,0 +1,372 @@ +package fieresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/fieresource/method_filescreateorupdate.go b/resource-manager/datamigration/2025-06-30/fieresource/method_filescreateorupdate.go new file mode 100644 index 00000000000..35ee1e167d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/method_filescreateorupdate.go @@ -0,0 +1,58 @@ +package fieresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesCreateOrUpdate ... +func (c FieResourceClient) FilesCreateOrUpdate(ctx context.Context, id FileId, input ProjectFile) (result FilesCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/fieresource/model_projectfile.go b/resource-manager/datamigration/2025-06-30/fieresource/model_projectfile.go new file mode 100644 index 00000000000..3f74c0a2b07 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/model_projectfile.go @@ -0,0 +1,17 @@ +package fieresource + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFile struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectFileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/fieresource/model_projectfileproperties.go b/resource-manager/datamigration/2025-06-30/fieresource/model_projectfileproperties.go new file mode 100644 index 00000000000..2a5ef29fb46 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/model_projectfileproperties.go @@ -0,0 +1,30 @@ +package fieresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFileProperties struct { + Extension *string `json:"extension,omitempty"` + FilePath *string `json:"filePath,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + Size *int64 `json:"size,omitempty"` +} + +func (o *ProjectFileProperties) GetLastModifiedAsTime() (*time.Time, error) { + if o.LastModified == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModified, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectFileProperties) SetLastModifiedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModified = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/fieresource/version.go b/resource-manager/datamigration/2025-06-30/fieresource/version.go new file mode 100644 index 00000000000..27978f1f6a7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fieresource/version.go @@ -0,0 +1,10 @@ +package fieresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/fieresource/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/README.md b/resource-manager/datamigration/2025-06-30/fileresource/README.md new file mode 100644 index 00000000000..51321fc2b25 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/README.md @@ -0,0 +1,122 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/fileresource` Documentation + +The `fileresource` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/fileresource" +``` + + +### Client Initialization + +```go +client := fileresource.NewFileResourceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `FileResourceClient.FilesDelete` + +```go +ctx := context.TODO() +id := fileresource.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesDelete(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `FileResourceClient.FilesGet` + +```go +ctx := context.TODO() +id := fileresource.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `FileResourceClient.FilesList` + +```go +ctx := context.TODO() +id := fileresource.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +// alternatively `client.FilesList(ctx, id)` can be used to do batched pagination +items, err := client.FilesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `FileResourceClient.FilesRead` + +```go +ctx := context.TODO() +id := fileresource.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesRead(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `FileResourceClient.FilesReadWrite` + +```go +ctx := context.TODO() +id := fileresource.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesReadWrite(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `FileResourceClient.FilesUpdate` + +```go +ctx := context.TODO() +id := fileresource.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +payload := fileresource.ProjectFile{ + // ... +} + + +read, err := client.FilesUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/fileresource/client.go b/resource-manager/datamigration/2025-06-30/fileresource/client.go new file mode 100644 index 00000000000..a76a462c144 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/client.go @@ -0,0 +1,26 @@ +package fileresource + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileResourceClient struct { + Client *resourcemanager.Client +} + +func NewFileResourceClientWithBaseURI(sdkApi sdkEnv.Api) (*FileResourceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "fileresource", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating FileResourceClient: %+v", err) + } + + return &FileResourceClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/id_file.go b/resource-manager/datamigration/2025-06-30/fileresource/id_file.go new file mode 100644 index 00000000000..0fae6c87717 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/id_file.go @@ -0,0 +1,148 @@ +package fileresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/id_file_test.go b/resource-manager/datamigration/2025-06-30/fileresource/id_file_test.go new file mode 100644 index 00000000000..f13b7e99adc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/id_file_test.go @@ -0,0 +1,372 @@ +package fileresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/id_project.go b/resource-manager/datamigration/2025-06-30/fileresource/id_project.go new file mode 100644 index 00000000000..c9d99858f9d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/id_project.go @@ -0,0 +1,139 @@ +package fileresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/id_project_test.go b/resource-manager/datamigration/2025-06-30/fileresource/id_project_test.go new file mode 100644 index 00000000000..21a120e14af --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/id_project_test.go @@ -0,0 +1,327 @@ +package fileresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/method_filesdelete.go b/resource-manager/datamigration/2025-06-30/fileresource/method_filesdelete.go new file mode 100644 index 00000000000..4ebb8f6f3e6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/method_filesdelete.go @@ -0,0 +1,47 @@ +package fileresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +// FilesDelete ... +func (c FileResourceClient) FilesDelete(ctx context.Context, id FileId) (result FilesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/method_filesget.go b/resource-manager/datamigration/2025-06-30/fileresource/method_filesget.go new file mode 100644 index 00000000000..1b2ec80afeb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/method_filesget.go @@ -0,0 +1,53 @@ +package fileresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesGet ... +func (c FileResourceClient) FilesGet(ctx context.Context, id FileId) (result FilesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/method_fileslist.go b/resource-manager/datamigration/2025-06-30/fileresource/method_fileslist.go new file mode 100644 index 00000000000..2d6b3ef540f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/method_fileslist.go @@ -0,0 +1,105 @@ +package fileresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectFile +} + +type FilesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectFile +} + +type FilesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *FilesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// FilesList ... +func (c FileResourceClient) FilesList(ctx context.Context, id ProjectId) (result FilesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &FilesListCustomPager{}, + Path: fmt.Sprintf("%s/files", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectFile `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// FilesListComplete retrieves all the results into a single object +func (c FileResourceClient) FilesListComplete(ctx context.Context, id ProjectId) (FilesListCompleteResult, error) { + return c.FilesListCompleteMatchingPredicate(ctx, id, ProjectFileOperationPredicate{}) +} + +// FilesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c FileResourceClient) FilesListCompleteMatchingPredicate(ctx context.Context, id ProjectId, predicate ProjectFileOperationPredicate) (result FilesListCompleteResult, err error) { + items := make([]ProjectFile, 0) + + resp, err := c.FilesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = FilesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/method_filesread.go b/resource-manager/datamigration/2025-06-30/fileresource/method_filesread.go new file mode 100644 index 00000000000..2ed298377f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/method_filesread.go @@ -0,0 +1,54 @@ +package fileresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesReadOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *FileStorageInfo +} + +// FilesRead ... +func (c FileResourceClient) FilesRead(ctx context.Context, id FileId) (result FilesReadOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/read", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model FileStorageInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/method_filesreadwrite.go b/resource-manager/datamigration/2025-06-30/fileresource/method_filesreadwrite.go new file mode 100644 index 00000000000..d1ec6f1c739 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/method_filesreadwrite.go @@ -0,0 +1,54 @@ +package fileresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesReadWriteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *FileStorageInfo +} + +// FilesReadWrite ... +func (c FileResourceClient) FilesReadWrite(ctx context.Context, id FileId) (result FilesReadWriteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/readwrite", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model FileStorageInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/method_filesupdate.go b/resource-manager/datamigration/2025-06-30/fileresource/method_filesupdate.go new file mode 100644 index 00000000000..03d5341271a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/method_filesupdate.go @@ -0,0 +1,57 @@ +package fileresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesUpdate ... +func (c FileResourceClient) FilesUpdate(ctx context.Context, id FileId, input ProjectFile) (result FilesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/model_filestorageinfo.go b/resource-manager/datamigration/2025-06-30/fileresource/model_filestorageinfo.go new file mode 100644 index 00000000000..87eee2b13c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/model_filestorageinfo.go @@ -0,0 +1,9 @@ +package fileresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileStorageInfo struct { + Headers *map[string]string `json:"headers,omitempty"` + Uri *string `json:"uri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/model_projectfile.go b/resource-manager/datamigration/2025-06-30/fileresource/model_projectfile.go new file mode 100644 index 00000000000..47163ef1b3d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/model_projectfile.go @@ -0,0 +1,17 @@ +package fileresource + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFile struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectFileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/model_projectfileproperties.go b/resource-manager/datamigration/2025-06-30/fileresource/model_projectfileproperties.go new file mode 100644 index 00000000000..fb8dfcc2c27 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/model_projectfileproperties.go @@ -0,0 +1,30 @@ +package fileresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFileProperties struct { + Extension *string `json:"extension,omitempty"` + FilePath *string `json:"filePath,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + Size *int64 `json:"size,omitempty"` +} + +func (o *ProjectFileProperties) GetLastModifiedAsTime() (*time.Time, error) { + if o.LastModified == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModified, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectFileProperties) SetLastModifiedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModified = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/predicates.go b/resource-manager/datamigration/2025-06-30/fileresource/predicates.go new file mode 100644 index 00000000000..20b2ad94d58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/predicates.go @@ -0,0 +1,32 @@ +package fileresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFileOperationPredicate struct { + Etag *string + Id *string + Name *string + Type *string +} + +func (p ProjectFileOperationPredicate) Matches(input ProjectFile) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/fileresource/version.go b/resource-manager/datamigration/2025-06-30/fileresource/version.go new file mode 100644 index 00000000000..9a09751339d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/fileresource/version.go @@ -0,0 +1,10 @@ +package fileresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/fileresource/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/get/README.md b/resource-manager/datamigration/2025-06-30/get/README.md new file mode 100644 index 00000000000..6982a348941 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/README.md @@ -0,0 +1,254 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/get` Documentation + +The `get` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/get" +``` + + +### Client Initialization + +```go +client := get.NewGETClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `GETClient.FilesGet` + +```go +ctx := context.TODO() +id := get.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `GETClient.FilesList` + +```go +ctx := context.TODO() +id := get.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +// alternatively `client.FilesList(ctx, id)` can be used to do batched pagination +items, err := client.FilesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.ProjectsGet` + +```go +ctx := context.TODO() +id := get.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +read, err := client.ProjectsGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `GETClient.ProjectsList` + +```go +ctx := context.TODO() +id := get.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ProjectsList(ctx, id)` can be used to do batched pagination +items, err := client.ProjectsListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.ResourceSkusListSkus` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ResourceSkusListSkus(ctx, id)` can be used to do batched pagination +items, err := client.ResourceSkusListSkusComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.ServiceTasksGet` + +```go +ctx := context.TODO() +id := get.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksGet(ctx, id, get.DefaultServiceTasksGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `GETClient.ServiceTasksList` + +```go +ctx := context.TODO() +id := get.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ServiceTasksList(ctx, id, get.DefaultServiceTasksListOperationOptions())` can be used to do batched pagination +items, err := client.ServiceTasksListComplete(ctx, id, get.DefaultServiceTasksListOperationOptions()) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.ServicesGet` + +```go +ctx := context.TODO() +id := get.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +read, err := client.ServicesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `GETClient.ServicesList` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ServicesList(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.ServicesListByResourceGroup` + +```go +ctx := context.TODO() +id := get.NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName") + +// alternatively `client.ServicesListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.ServicesListSkus` + +```go +ctx := context.TODO() +id := get.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ServicesListSkus(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListSkusComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.TasksGet` + +```go +ctx := context.TODO() +id := get.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksGet(ctx, id, get.DefaultTasksGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `GETClient.TasksList` + +```go +ctx := context.TODO() +id := get.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +// alternatively `client.TasksList(ctx, id, get.DefaultTasksListOperationOptions())` can be used to do batched pagination +items, err := client.TasksListComplete(ctx, id, get.DefaultTasksListOperationOptions()) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `GETClient.UsagesList` + +```go +ctx := context.TODO() +id := get.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.UsagesList(ctx, id)` can be used to do batched pagination +items, err := client.UsagesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/datamigration/2025-06-30/get/client.go b/resource-manager/datamigration/2025-06-30/get/client.go new file mode 100644 index 00000000000..ebc4d89f523 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/client.go @@ -0,0 +1,26 @@ +package get + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GETClient struct { + Client *resourcemanager.Client +} + +func NewGETClientWithBaseURI(sdkApi sdkEnv.Api) (*GETClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "get", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating GETClient: %+v", err) + } + + return &GETClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/constants.go b/resource-manager/datamigration/2025-06-30/get/constants.go new file mode 100644 index 00000000000..4e075bc87cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/constants.go @@ -0,0 +1,2481 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ProjectProvisioningState string + +const ( + ProjectProvisioningStateDeleting ProjectProvisioningState = "Deleting" + ProjectProvisioningStateSucceeded ProjectProvisioningState = "Succeeded" +) + +func PossibleValuesForProjectProvisioningState() []string { + return []string{ + string(ProjectProvisioningStateDeleting), + string(ProjectProvisioningStateSucceeded), + } +} + +func (s *ProjectProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectProvisioningState(input string) (*ProjectProvisioningState, error) { + vals := map[string]ProjectProvisioningState{ + "deleting": ProjectProvisioningStateDeleting, + "succeeded": ProjectProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectProvisioningState(input) + return &out, nil +} + +type ProjectSourcePlatform string + +const ( + ProjectSourcePlatformMongoDb ProjectSourcePlatform = "MongoDb" + ProjectSourcePlatformMySQL ProjectSourcePlatform = "MySQL" + ProjectSourcePlatformPostgreSql ProjectSourcePlatform = "PostgreSql" + ProjectSourcePlatformSQL ProjectSourcePlatform = "SQL" + ProjectSourcePlatformUnknown ProjectSourcePlatform = "Unknown" +) + +func PossibleValuesForProjectSourcePlatform() []string { + return []string{ + string(ProjectSourcePlatformMongoDb), + string(ProjectSourcePlatformMySQL), + string(ProjectSourcePlatformPostgreSql), + string(ProjectSourcePlatformSQL), + string(ProjectSourcePlatformUnknown), + } +} + +func (s *ProjectSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectSourcePlatform(input string) (*ProjectSourcePlatform, error) { + vals := map[string]ProjectSourcePlatform{ + "mongodb": ProjectSourcePlatformMongoDb, + "mysql": ProjectSourcePlatformMySQL, + "postgresql": ProjectSourcePlatformPostgreSql, + "sql": ProjectSourcePlatformSQL, + "unknown": ProjectSourcePlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectSourcePlatform(input) + return &out, nil +} + +type ProjectTargetPlatform string + +const ( + ProjectTargetPlatformAzureDbForMySql ProjectTargetPlatform = "AzureDbForMySql" + ProjectTargetPlatformAzureDbForPostgreSql ProjectTargetPlatform = "AzureDbForPostgreSql" + ProjectTargetPlatformMongoDb ProjectTargetPlatform = "MongoDb" + ProjectTargetPlatformSQLDB ProjectTargetPlatform = "SQLDB" + ProjectTargetPlatformSQLMI ProjectTargetPlatform = "SQLMI" + ProjectTargetPlatformUnknown ProjectTargetPlatform = "Unknown" +) + +func PossibleValuesForProjectTargetPlatform() []string { + return []string{ + string(ProjectTargetPlatformAzureDbForMySql), + string(ProjectTargetPlatformAzureDbForPostgreSql), + string(ProjectTargetPlatformMongoDb), + string(ProjectTargetPlatformSQLDB), + string(ProjectTargetPlatformSQLMI), + string(ProjectTargetPlatformUnknown), + } +} + +func (s *ProjectTargetPlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectTargetPlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectTargetPlatform(input string) (*ProjectTargetPlatform, error) { + vals := map[string]ProjectTargetPlatform{ + "azuredbformysql": ProjectTargetPlatformAzureDbForMySql, + "azuredbforpostgresql": ProjectTargetPlatformAzureDbForPostgreSql, + "mongodb": ProjectTargetPlatformMongoDb, + "sqldb": ProjectTargetPlatformSQLDB, + "sqlmi": ProjectTargetPlatformSQLMI, + "unknown": ProjectTargetPlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectTargetPlatform(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResourceSkuCapacityScaleType string + +const ( + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic" + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual" + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None" +) + +func PossibleValuesForResourceSkuCapacityScaleType() []string { + return []string{ + string(ResourceSkuCapacityScaleTypeAutomatic), + string(ResourceSkuCapacityScaleTypeManual), + string(ResourceSkuCapacityScaleTypeNone), + } +} + +func (s *ResourceSkuCapacityScaleType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceSkuCapacityScaleType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceSkuCapacityScaleType(input string) (*ResourceSkuCapacityScaleType, error) { + vals := map[string]ResourceSkuCapacityScaleType{ + "automatic": ResourceSkuCapacityScaleTypeAutomatic, + "manual": ResourceSkuCapacityScaleTypeManual, + "none": ResourceSkuCapacityScaleTypeNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceSkuCapacityScaleType(input) + return &out, nil +} + +type ResourceSkuRestrictionsReasonCode string + +const ( + ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + ResourceSkuRestrictionsReasonCodeQuotaId ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +func PossibleValuesForResourceSkuRestrictionsReasonCode() []string { + return []string{ + string(ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription), + string(ResourceSkuRestrictionsReasonCodeQuotaId), + } +} + +func (s *ResourceSkuRestrictionsReasonCode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceSkuRestrictionsReasonCode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceSkuRestrictionsReasonCode(input string) (*ResourceSkuRestrictionsReasonCode, error) { + vals := map[string]ResourceSkuRestrictionsReasonCode{ + "notavailableforsubscription": ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription, + "quotaid": ResourceSkuRestrictionsReasonCodeQuotaId, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceSkuRestrictionsReasonCode(input) + return &out, nil +} + +type ResourceSkuRestrictionsType string + +const ( + ResourceSkuRestrictionsTypeLocation ResourceSkuRestrictionsType = "location" +) + +func PossibleValuesForResourceSkuRestrictionsType() []string { + return []string{ + string(ResourceSkuRestrictionsTypeLocation), + } +} + +func (s *ResourceSkuRestrictionsType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceSkuRestrictionsType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceSkuRestrictionsType(input string) (*ResourceSkuRestrictionsType, error) { + vals := map[string]ResourceSkuRestrictionsType{ + "location": ResourceSkuRestrictionsTypeLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceSkuRestrictionsType(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type ServiceProvisioningState string + +const ( + ServiceProvisioningStateAccepted ServiceProvisioningState = "Accepted" + ServiceProvisioningStateDeleting ServiceProvisioningState = "Deleting" + ServiceProvisioningStateDeploying ServiceProvisioningState = "Deploying" + ServiceProvisioningStateFailed ServiceProvisioningState = "Failed" + ServiceProvisioningStateFailedToStart ServiceProvisioningState = "FailedToStart" + ServiceProvisioningStateFailedToStop ServiceProvisioningState = "FailedToStop" + ServiceProvisioningStateStarting ServiceProvisioningState = "Starting" + ServiceProvisioningStateStopped ServiceProvisioningState = "Stopped" + ServiceProvisioningStateStopping ServiceProvisioningState = "Stopping" + ServiceProvisioningStateSucceeded ServiceProvisioningState = "Succeeded" +) + +func PossibleValuesForServiceProvisioningState() []string { + return []string{ + string(ServiceProvisioningStateAccepted), + string(ServiceProvisioningStateDeleting), + string(ServiceProvisioningStateDeploying), + string(ServiceProvisioningStateFailed), + string(ServiceProvisioningStateFailedToStart), + string(ServiceProvisioningStateFailedToStop), + string(ServiceProvisioningStateStarting), + string(ServiceProvisioningStateStopped), + string(ServiceProvisioningStateStopping), + string(ServiceProvisioningStateSucceeded), + } +} + +func (s *ServiceProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceProvisioningState(input string) (*ServiceProvisioningState, error) { + vals := map[string]ServiceProvisioningState{ + "accepted": ServiceProvisioningStateAccepted, + "deleting": ServiceProvisioningStateDeleting, + "deploying": ServiceProvisioningStateDeploying, + "failed": ServiceProvisioningStateFailed, + "failedtostart": ServiceProvisioningStateFailedToStart, + "failedtostop": ServiceProvisioningStateFailedToStop, + "starting": ServiceProvisioningStateStarting, + "stopped": ServiceProvisioningStateStopped, + "stopping": ServiceProvisioningStateStopping, + "succeeded": ServiceProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceProvisioningState(input) + return &out, nil +} + +type ServiceScalability string + +const ( + ServiceScalabilityAutomatic ServiceScalability = "automatic" + ServiceScalabilityManual ServiceScalability = "manual" + ServiceScalabilityNone ServiceScalability = "none" +) + +func PossibleValuesForServiceScalability() []string { + return []string{ + string(ServiceScalabilityAutomatic), + string(ServiceScalabilityManual), + string(ServiceScalabilityNone), + } +} + +func (s *ServiceScalability) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceScalability(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceScalability(input string) (*ServiceScalability, error) { + vals := map[string]ServiceScalability{ + "automatic": ServiceScalabilityAutomatic, + "manual": ServiceScalabilityManual, + "none": ServiceScalabilityNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceScalability(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_file.go b/resource-manager/datamigration/2025-06-30/get/id_file.go new file mode 100644 index 00000000000..2452146b04e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_file.go @@ -0,0 +1,148 @@ +package get + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_file_test.go b/resource-manager/datamigration/2025-06-30/get/id_file_test.go new file mode 100644 index 00000000000..4770c4fed7d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_file_test.go @@ -0,0 +1,372 @@ +package get + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_location.go b/resource-manager/datamigration/2025-06-30/get/id_location.go new file mode 100644 index 00000000000..ab70e2e0fa9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_location.go @@ -0,0 +1,121 @@ +package get + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&LocationId{}) +} + +var _ resourceids.ResourceId = &LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + LocationName string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *LocationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + return nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.DataMigration/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_location_test.go b/resource-manager/datamigration/2025-06-30/get/id_location_test.go new file mode 100644 index 00000000000..b7f8e4c25dc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_location_test.go @@ -0,0 +1,237 @@ +package get + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs/lOcAtIoNnAmE", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs/lOcAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_project.go b/resource-manager/datamigration/2025-06-30/get/id_project.go new file mode 100644 index 00000000000..878ba8e4c36 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_project.go @@ -0,0 +1,139 @@ +package get + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_project_test.go b/resource-manager/datamigration/2025-06-30/get/id_project_test.go new file mode 100644 index 00000000000..4c8ac6f6a0b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_project_test.go @@ -0,0 +1,327 @@ +package get + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_service.go b/resource-manager/datamigration/2025-06-30/get/id_service.go new file mode 100644 index 00000000000..8c6bf2fcad1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_service.go @@ -0,0 +1,130 @@ +package get + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_service_test.go b/resource-manager/datamigration/2025-06-30/get/id_service_test.go new file mode 100644 index 00000000000..3efb76b7aec --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_service_test.go @@ -0,0 +1,282 @@ +package get + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_servicetask.go b/resource-manager/datamigration/2025-06-30/get/id_servicetask.go new file mode 100644 index 00000000000..0a837a71c78 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_servicetask.go @@ -0,0 +1,139 @@ +package get + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/get/id_servicetask_test.go new file mode 100644 index 00000000000..aa194349780 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_servicetask_test.go @@ -0,0 +1,327 @@ +package get + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_subscriptionresourcegroup.go b/resource-manager/datamigration/2025-06-30/get/id_subscriptionresourcegroup.go new file mode 100644 index 00000000000..0f4f534d149 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_subscriptionresourcegroup.go @@ -0,0 +1,119 @@ +package get + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SubscriptionResourceGroupId{}) +} + +var _ resourceids.ResourceId = &SubscriptionResourceGroupId{} + +// SubscriptionResourceGroupId is a struct representing the Resource ID for a Subscription Resource Group +type SubscriptionResourceGroupId struct { + SubscriptionId string + ResourceGroupName string +} + +// NewSubscriptionResourceGroupID returns a new SubscriptionResourceGroupId struct +func NewSubscriptionResourceGroupID(subscriptionId string, resourceGroupName string) SubscriptionResourceGroupId { + return SubscriptionResourceGroupId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + } +} + +// ParseSubscriptionResourceGroupID parses 'input' into a SubscriptionResourceGroupId +func ParseSubscriptionResourceGroupID(input string) (*SubscriptionResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubscriptionResourceGroupId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubscriptionResourceGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSubscriptionResourceGroupIDInsensitively parses 'input' case-insensitively into a SubscriptionResourceGroupId +// note: this method should only be used for API response data and not user input +func ParseSubscriptionResourceGroupIDInsensitively(input string) (*SubscriptionResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubscriptionResourceGroupId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubscriptionResourceGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SubscriptionResourceGroupId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + return nil +} + +// ValidateSubscriptionResourceGroupID checks that 'input' can be parsed as a Subscription Resource Group ID +func ValidateSubscriptionResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSubscriptionResourceGroupID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Subscription Resource Group ID +func (id SubscriptionResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription Resource Group ID +func (id SubscriptionResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + } +} + +// String returns a human-readable description of this Subscription Resource Group ID +func (id SubscriptionResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Subscription Resource Group (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_subscriptionresourcegroup_test.go b/resource-manager/datamigration/2025-06-30/get/id_subscriptionresourcegroup_test.go new file mode 100644 index 00000000000..24e3176d189 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_subscriptionresourcegroup_test.go @@ -0,0 +1,207 @@ +package get + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SubscriptionResourceGroupId{} + +func TestNewSubscriptionResourceGroupID(t *testing.T) { + id := NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } +} + +func TestFormatSubscriptionResourceGroupID(t *testing.T) { + actual := NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSubscriptionResourceGroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionResourceGroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} + +func TestParseSubscriptionResourceGroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionResourceGroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} + +func TestSegmentsForSubscriptionResourceGroupId(t *testing.T) { + segments := SubscriptionResourceGroupId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SubscriptionResourceGroupId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_task.go b/resource-manager/datamigration/2025-06-30/get/id_task.go new file mode 100644 index 00000000000..fee837d3d29 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_task.go @@ -0,0 +1,148 @@ +package get + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/get/id_task_test.go b/resource-manager/datamigration/2025-06-30/get/id_task_test.go new file mode 100644 index 00000000000..8e28ca4487a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/id_task_test.go @@ -0,0 +1,372 @@ +package get + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_filesget.go b/resource-manager/datamigration/2025-06-30/get/method_filesget.go new file mode 100644 index 00000000000..0cbffa3ea3d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_filesget.go @@ -0,0 +1,53 @@ +package get + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesGet ... +func (c GETClient) FilesGet(ctx context.Context, id FileId) (result FilesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_fileslist.go b/resource-manager/datamigration/2025-06-30/get/method_fileslist.go new file mode 100644 index 00000000000..b84c2ef218c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_fileslist.go @@ -0,0 +1,105 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectFile +} + +type FilesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectFile +} + +type FilesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *FilesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// FilesList ... +func (c GETClient) FilesList(ctx context.Context, id ProjectId) (result FilesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &FilesListCustomPager{}, + Path: fmt.Sprintf("%s/files", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectFile `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// FilesListComplete retrieves all the results into a single object +func (c GETClient) FilesListComplete(ctx context.Context, id ProjectId) (FilesListCompleteResult, error) { + return c.FilesListCompleteMatchingPredicate(ctx, id, ProjectFileOperationPredicate{}) +} + +// FilesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) FilesListCompleteMatchingPredicate(ctx context.Context, id ProjectId, predicate ProjectFileOperationPredicate) (result FilesListCompleteResult, err error) { + items := make([]ProjectFile, 0) + + resp, err := c.FilesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = FilesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_projectsget.go b/resource-manager/datamigration/2025-06-30/get/method_projectsget.go new file mode 100644 index 00000000000..8632137438f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_projectsget.go @@ -0,0 +1,53 @@ +package get + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsGet ... +func (c GETClient) ProjectsGet(ctx context.Context, id ProjectId) (result ProjectsGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_projectslist.go b/resource-manager/datamigration/2025-06-30/get/method_projectslist.go new file mode 100644 index 00000000000..eadf0aef05a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_projectslist.go @@ -0,0 +1,105 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Project +} + +type ProjectsListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Project +} + +type ProjectsListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ProjectsListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ProjectsList ... +func (c GETClient) ProjectsList(ctx context.Context, id ServiceId) (result ProjectsListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ProjectsListCustomPager{}, + Path: fmt.Sprintf("%s/projects", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Project `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ProjectsListComplete retrieves all the results into a single object +func (c GETClient) ProjectsListComplete(ctx context.Context, id ServiceId) (ProjectsListCompleteResult, error) { + return c.ProjectsListCompleteMatchingPredicate(ctx, id, ProjectOperationPredicate{}) +} + +// ProjectsListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) ProjectsListCompleteMatchingPredicate(ctx context.Context, id ServiceId, predicate ProjectOperationPredicate) (result ProjectsListCompleteResult, err error) { + items := make([]Project, 0) + + resp, err := c.ProjectsList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ProjectsListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_resourceskuslistskus.go b/resource-manager/datamigration/2025-06-30/get/method_resourceskuslistskus.go new file mode 100644 index 00000000000..54e35b690c5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_resourceskuslistskus.go @@ -0,0 +1,106 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkusListSkusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ResourceSku +} + +type ResourceSkusListSkusCompleteResult struct { + LatestHttpResponse *http.Response + Items []ResourceSku +} + +type ResourceSkusListSkusCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ResourceSkusListSkusCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ResourceSkusListSkus ... +func (c GETClient) ResourceSkusListSkus(ctx context.Context, id commonids.SubscriptionId) (result ResourceSkusListSkusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ResourceSkusListSkusCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/skus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ResourceSku `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ResourceSkusListSkusComplete retrieves all the results into a single object +func (c GETClient) ResourceSkusListSkusComplete(ctx context.Context, id commonids.SubscriptionId) (ResourceSkusListSkusCompleteResult, error) { + return c.ResourceSkusListSkusCompleteMatchingPredicate(ctx, id, ResourceSkuOperationPredicate{}) +} + +// ResourceSkusListSkusCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) ResourceSkusListSkusCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate ResourceSkuOperationPredicate) (result ResourceSkusListSkusCompleteResult, err error) { + items := make([]ResourceSku, 0) + + resp, err := c.ResourceSkusListSkus(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ResourceSkusListSkusCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_servicesget.go b/resource-manager/datamigration/2025-06-30/get/method_servicesget.go new file mode 100644 index 00000000000..4edea98b206 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_servicesget.go @@ -0,0 +1,53 @@ +package get + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesGet ... +func (c GETClient) ServicesGet(ctx context.Context, id ServiceId) (result ServicesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataMigrationService + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_serviceslist.go b/resource-manager/datamigration/2025-06-30/get/method_serviceslist.go new file mode 100644 index 00000000000..bdacf585a16 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_serviceslist.go @@ -0,0 +1,106 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataMigrationService +} + +type ServicesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataMigrationService +} + +type ServicesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesList ... +func (c GETClient) ServicesList(ctx context.Context, id commonids.SubscriptionId) (result ServicesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/services", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListComplete retrieves all the results into a single object +func (c GETClient) ServicesListComplete(ctx context.Context, id commonids.SubscriptionId) (ServicesListCompleteResult, error) { + return c.ServicesListCompleteMatchingPredicate(ctx, id, DataMigrationServiceOperationPredicate{}) +} + +// ServicesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) ServicesListCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate DataMigrationServiceOperationPredicate) (result ServicesListCompleteResult, err error) { + items := make([]DataMigrationService, 0) + + resp, err := c.ServicesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_serviceslistbyresourcegroup.go b/resource-manager/datamigration/2025-06-30/get/method_serviceslistbyresourcegroup.go new file mode 100644 index 00000000000..dcaf09f4eb6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_serviceslistbyresourcegroup.go @@ -0,0 +1,105 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataMigrationService +} + +type ServicesListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataMigrationService +} + +type ServicesListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesListByResourceGroup ... +func (c GETClient) ServicesListByResourceGroup(ctx context.Context, id SubscriptionResourceGroupId) (result ServicesListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/services", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListByResourceGroupComplete retrieves all the results into a single object +func (c GETClient) ServicesListByResourceGroupComplete(ctx context.Context, id SubscriptionResourceGroupId) (ServicesListByResourceGroupCompleteResult, error) { + return c.ServicesListByResourceGroupCompleteMatchingPredicate(ctx, id, DataMigrationServiceOperationPredicate{}) +} + +// ServicesListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) ServicesListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id SubscriptionResourceGroupId, predicate DataMigrationServiceOperationPredicate) (result ServicesListByResourceGroupCompleteResult, err error) { + items := make([]DataMigrationService, 0) + + resp, err := c.ServicesListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_serviceslistskus.go b/resource-manager/datamigration/2025-06-30/get/method_serviceslistskus.go new file mode 100644 index 00000000000..169a4ae112a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_serviceslistskus.go @@ -0,0 +1,105 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListSkusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]AvailableServiceSku +} + +type ServicesListSkusCompleteResult struct { + LatestHttpResponse *http.Response + Items []AvailableServiceSku +} + +type ServicesListSkusCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListSkusCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesListSkus ... +func (c GETClient) ServicesListSkus(ctx context.Context, id ServiceId) (result ServicesListSkusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListSkusCustomPager{}, + Path: fmt.Sprintf("%s/skus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]AvailableServiceSku `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListSkusComplete retrieves all the results into a single object +func (c GETClient) ServicesListSkusComplete(ctx context.Context, id ServiceId) (ServicesListSkusCompleteResult, error) { + return c.ServicesListSkusCompleteMatchingPredicate(ctx, id, AvailableServiceSkuOperationPredicate{}) +} + +// ServicesListSkusCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) ServicesListSkusCompleteMatchingPredicate(ctx context.Context, id ServiceId, predicate AvailableServiceSkuOperationPredicate) (result ServicesListSkusCompleteResult, err error) { + items := make([]AvailableServiceSku, 0) + + resp, err := c.ServicesListSkus(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListSkusCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_servicetasksget.go b/resource-manager/datamigration/2025-06-30/get/method_servicetasksget.go new file mode 100644 index 00000000000..ee8fe70e6e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_servicetasksget.go @@ -0,0 +1,83 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +type ServiceTasksGetOperationOptions struct { + Expand *string +} + +func DefaultServiceTasksGetOperationOptions() ServiceTasksGetOperationOptions { + return ServiceTasksGetOperationOptions{} +} + +func (o ServiceTasksGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + return &out +} + +// ServiceTasksGet ... +func (c GETClient) ServiceTasksGet(ctx context.Context, id ServiceTaskId, options ServiceTasksGetOperationOptions) (result ServiceTasksGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_servicetaskslist.go b/resource-manager/datamigration/2025-06-30/get/method_servicetaskslist.go new file mode 100644 index 00000000000..7b8dbcade99 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_servicetaskslist.go @@ -0,0 +1,134 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectTask +} + +type ServiceTasksListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectTask +} + +type ServiceTasksListOperationOptions struct { + TaskType *string +} + +func DefaultServiceTasksListOperationOptions() ServiceTasksListOperationOptions { + return ServiceTasksListOperationOptions{} +} + +func (o ServiceTasksListOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksListOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksListOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.TaskType != nil { + out.Append("taskType", fmt.Sprintf("%v", *o.TaskType)) + } + return &out +} + +type ServiceTasksListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServiceTasksListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServiceTasksList ... +func (c GETClient) ServiceTasksList(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions) (result ServiceTasksListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Pager: &ServiceTasksListCustomPager{}, + Path: fmt.Sprintf("%s/serviceTasks", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectTask `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServiceTasksListComplete retrieves all the results into a single object +func (c GETClient) ServiceTasksListComplete(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions) (ServiceTasksListCompleteResult, error) { + return c.ServiceTasksListCompleteMatchingPredicate(ctx, id, options, ProjectTaskOperationPredicate{}) +} + +// ServiceTasksListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) ServiceTasksListCompleteMatchingPredicate(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions, predicate ProjectTaskOperationPredicate) (result ServiceTasksListCompleteResult, err error) { + items := make([]ProjectTask, 0) + + resp, err := c.ServiceTasksList(ctx, id, options) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServiceTasksListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_tasksget.go b/resource-manager/datamigration/2025-06-30/get/method_tasksget.go new file mode 100644 index 00000000000..22dc283d8ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_tasksget.go @@ -0,0 +1,83 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +type TasksGetOperationOptions struct { + Expand *string +} + +func DefaultTasksGetOperationOptions() TasksGetOperationOptions { + return TasksGetOperationOptions{} +} + +func (o TasksGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + return &out +} + +// TasksGet ... +func (c GETClient) TasksGet(ctx context.Context, id TaskId, options TasksGetOperationOptions) (result TasksGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_taskslist.go b/resource-manager/datamigration/2025-06-30/get/method_taskslist.go new file mode 100644 index 00000000000..e00e68d1653 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_taskslist.go @@ -0,0 +1,134 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectTask +} + +type TasksListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectTask +} + +type TasksListOperationOptions struct { + TaskType *string +} + +func DefaultTasksListOperationOptions() TasksListOperationOptions { + return TasksListOperationOptions{} +} + +func (o TasksListOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksListOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksListOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.TaskType != nil { + out.Append("taskType", fmt.Sprintf("%v", *o.TaskType)) + } + return &out +} + +type TasksListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *TasksListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// TasksList ... +func (c GETClient) TasksList(ctx context.Context, id ProjectId, options TasksListOperationOptions) (result TasksListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Pager: &TasksListCustomPager{}, + Path: fmt.Sprintf("%s/tasks", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectTask `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// TasksListComplete retrieves all the results into a single object +func (c GETClient) TasksListComplete(ctx context.Context, id ProjectId, options TasksListOperationOptions) (TasksListCompleteResult, error) { + return c.TasksListCompleteMatchingPredicate(ctx, id, options, ProjectTaskOperationPredicate{}) +} + +// TasksListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) TasksListCompleteMatchingPredicate(ctx context.Context, id ProjectId, options TasksListOperationOptions, predicate ProjectTaskOperationPredicate) (result TasksListCompleteResult, err error) { + items := make([]ProjectTask, 0) + + resp, err := c.TasksList(ctx, id, options) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = TasksListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/method_usageslist.go b/resource-manager/datamigration/2025-06-30/get/method_usageslist.go new file mode 100644 index 00000000000..3e4635ad4d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/method_usageslist.go @@ -0,0 +1,105 @@ +package get + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UsagesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Quota +} + +type UsagesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Quota +} + +type UsagesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *UsagesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// UsagesList ... +func (c GETClient) UsagesList(ctx context.Context, id LocationId) (result UsagesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &UsagesListCustomPager{}, + Path: fmt.Sprintf("%s/usages", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Quota `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// UsagesListComplete retrieves all the results into a single object +func (c GETClient) UsagesListComplete(ctx context.Context, id LocationId) (UsagesListCompleteResult, error) { + return c.UsagesListCompleteMatchingPredicate(ctx, id, QuotaOperationPredicate{}) +} + +// UsagesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c GETClient) UsagesListCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate QuotaOperationPredicate) (result UsagesListCompleteResult, err error) { + items := make([]Quota, 0) + + resp, err := c.UsagesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = UsagesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_availableservicesku.go b/resource-manager/datamigration/2025-06-30/get/model_availableservicesku.go new file mode 100644 index 00000000000..96fa82da078 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_availableservicesku.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSku struct { + Capacity *AvailableServiceSkuCapacity `json:"capacity,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + Sku *AvailableServiceSkuSku `json:"sku,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_availableserviceskucapacity.go b/resource-manager/datamigration/2025-06-30/get/model_availableserviceskucapacity.go new file mode 100644 index 00000000000..e194210af96 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_availableserviceskucapacity.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuCapacity struct { + Default *int64 `json:"default,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` + ScaleType *ServiceScalability `json:"scaleType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_availableserviceskusku.go b/resource-manager/datamigration/2025-06-30/get/model_availableserviceskusku.go new file mode 100644 index 00000000000..c0b29979754 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_availableserviceskusku.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuSku struct { + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/get/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..7b858ab5754 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/get/model_backupfileinfo.go new file mode 100644 index 00000000000..3df26127c30 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/get/model_backupsetinfo.go new file mode 100644 index 00000000000..bd399c22a81 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_blobshare.go b/resource-manager/datamigration/2025-06-30/get/model_blobshare.go new file mode 100644 index 00000000000..1c7782bedaa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_blobshare.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/get/model_commandproperties.go new file mode 100644 index 00000000000..1c200783c13 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_commandproperties.go @@ -0,0 +1,85 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_connectioninfo.go new file mode 100644 index 00000000000..17449325861 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connectioninfo.go @@ -0,0 +1,117 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "mongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..b88e566cf12 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..3b8ea048862 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..9653d175148 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..407cc650e74 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..ce9cd02a769 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..668067f52b4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..03666594374 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..1fc0ca23f09 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..56d507f1aaa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..02550f51cdc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..8f15be008de --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..0bdaa9de28c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..dd441654587 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..674e52eaf98 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..bf8bf0f5edd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..8473b27cbb2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..8c9fa2de18a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..4ee06e03ddb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..37bda42a76e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..a4cde62d720 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..2644bb1baf1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..2967272813a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..f3c9a5f9289 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..356ece605f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..170b46627f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..08c5a85b9ac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..b40ad698ac3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..99b1b835722 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..871f1c5a55c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..d233f43ed77 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..00e4a59c8f5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..d74b8fd9a6b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..9def55861c1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..ca61ecf129b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..1989a72d573 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..11208b2136d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..8b1613d19fe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..8c67c1fb8c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..e5796d2f864 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/get/model_databasebackupinfo.go new file mode 100644 index 00000000000..2523bdeadf6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/get/model_databasefileinfo.go new file mode 100644 index 00000000000..dbb87c80ae8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_databaseinfo.go b/resource-manager/datamigration/2025-06-30/get/model_databaseinfo.go new file mode 100644 index 00000000000..9b099d73523 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_databaseinfo.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseInfo struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/get/model_databasesummaryresult.go new file mode 100644 index 00000000000..2e3e1b08d11 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_databasetable.go b/resource-manager/datamigration/2025-06-30/get/model_databasetable.go new file mode 100644 index 00000000000..7646d8cf0cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_databasetable.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/get/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..ab39c33b81b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/get/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..942e2720261 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_datamigrationservice.go b/resource-manager/datamigration/2025-06-30/get/model_datamigrationservice.go new file mode 100644 index 00000000000..b17b42d9b69 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_datamigrationservice.go @@ -0,0 +1,21 @@ +package get + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationService struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataMigrationServiceProperties `json:"properties,omitempty"` + Sku *ServiceSku `json:"sku,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_datamigrationserviceproperties.go b/resource-manager/datamigration/2025-06-30/get/model_datamigrationserviceproperties.go new file mode 100644 index 00000000000..69bb6ef04c3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_datamigrationserviceproperties.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceProperties struct { + AutoStopDelay *string `json:"autoStopDelay,omitempty"` + DeleteResourcesOnStop *bool `json:"deleteResourcesOnStop,omitempty"` + ProvisioningState *ServiceProvisioningState `json:"provisioningState,omitempty"` + PublicKey *string `json:"publicKey,omitempty"` + VirtualNicId *string `json:"virtualNicId,omitempty"` + VirtualSubnetId *string `json:"virtualSubnetId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/get/model_executionstatistics.go new file mode 100644 index 00000000000..7432b2ffe30 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_executionstatistics.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_fileshare.go b/resource-manager/datamigration/2025-06-30/get/model_fileshare.go new file mode 100644 index 00000000000..2a845f18148 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_fileshare.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..c08db1e2c4d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..8a67e4d5258 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..5bc095ff345 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..8cf5b80a1ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..44fd5d3f57d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..b18a06cf595 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..6923cb52c7c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..78812a83a6a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..011be4b2c21 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..f9358e54f49 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..6db36f946a4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..0a660131ab2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..207067221f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..31599103639 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..d308ef2330d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..cbb8b185dd7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..661031b0495 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..1b92556b11a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..83668ff35a5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..5a60db3772f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..4d7e3fb1e89 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..aa6bb5f2c49 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..ab4492bde80 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..50c47eda760 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..ba292099fff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..6984499371c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..6a978aa60cb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..4810207bdcc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..9d452e13e55 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..4a157cd6bdb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..f03aaee9574 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..1ad912aa20b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..0fe3e3439e1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..50ebb3dcf93 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..da389f30818 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..a426b4c92de --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..2eec293906f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..311b9ac373a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..b4e6ea2a5aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..9680f2ab786 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..cf6a1070f53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..b97c45a8558 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..de5304a4547 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..f94c48d9305 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..76de557c2f6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..9ed3cfcc11f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..d9082448f5b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..28187263a97 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..85ae3175ee1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..51c9727d5b9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..03cf5337971 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..f5a3b75c8c1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..e4b7025f5be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..483d35473b4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..df65f1a10d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..823cbc8a7b0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..349b4c39b1e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..a1b49cd31f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..37b9ec31d7f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..ac140fbc3ee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..c845d0c09a6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..7691aa52be3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..b92ba4e1f11 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..df6ac039aa4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..800fb4830a3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..fddc9e7cd28 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..0869f964bb3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..adb6c52a0b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..e7e6e3641cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..75c2d31f454 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..058d7711172 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..9c6a4b3cbf1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..fd85e5ab407 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..bc36474d7b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..450ab2e4aff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..457a99f022e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..05f2e2c6cef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..e639963cb42 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..35829aeac16 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..662ae804b14 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..99fa121b786 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..82c70ad4cb3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..dfd5409d0f6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..425b269e2ea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..f6469059e5c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..31b3413894d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..bf71184a7d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..bd2eb517dd6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..1b8e358009a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..f37e84ab5c2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..40bd359b70a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..0ab29f51a9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskinput.go new file mode 100644 index 00000000000..d5c8079a0cf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutput.go new file mode 100644 index 00000000000..8be79def961 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..ca526cd649e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..1b0fc08442d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskproperties.go new file mode 100644 index 00000000000..a6f356acaae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..166810ce0e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..4439f70e1ba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..735f0cd58b6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/get/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..713478d9be8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/get/model_migrationreportresult.go new file mode 100644 index 00000000000..d32635884d6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..fff75667d6c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..cc5ee41ace8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationresult.go new file mode 100644 index 00000000000..029880e610f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..967feb30344 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..94cbca8aa8e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..b9c35fdcf96 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..822da2e8f56 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package get + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..b601e165605 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..04df0597abd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "mongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..8c7429fb8ca --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..a2dd1e538d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package get + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..105d54ea31a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/get/model_mongodberror.go new file mode 100644 index 00000000000..b20d9656133 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodberror.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..daa41f8144f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package get + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..45425b1f8a7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbprogress.go new file mode 100644 index 00000000000..47da0cc7646 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..35831b82363 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..f9b93ab1e76 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..b00590ab8b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/get/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..46197ed83cf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..09cf0693946 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_odataerror.go b/resource-manager/datamigration/2025-06-30/get/model_odataerror.go new file mode 100644 index 00000000000..95428145081 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_odataerror.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..3fccad4de83 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/get/model_orphaneduserinfo.go new file mode 100644 index 00000000000..20cac083d19 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..44e799d49db --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_project.go b/resource-manager/datamigration/2025-06-30/get/model_project.go new file mode 100644 index 00000000000..ac5c7dab02a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_project.go @@ -0,0 +1,19 @@ +package get + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Project struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_projectfile.go b/resource-manager/datamigration/2025-06-30/get/model_projectfile.go new file mode 100644 index 00000000000..63271259311 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_projectfile.go @@ -0,0 +1,17 @@ +package get + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFile struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectFileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_projectfileproperties.go b/resource-manager/datamigration/2025-06-30/get/model_projectfileproperties.go new file mode 100644 index 00000000000..4d3463ecd68 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_projectfileproperties.go @@ -0,0 +1,30 @@ +package get + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFileProperties struct { + Extension *string `json:"extension,omitempty"` + FilePath *string `json:"filePath,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + Size *int64 `json:"size,omitempty"` +} + +func (o *ProjectFileProperties) GetLastModifiedAsTime() (*time.Time, error) { + if o.LastModified == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModified, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectFileProperties) SetLastModifiedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModified = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_projectproperties.go b/resource-manager/datamigration/2025-06-30/get/model_projectproperties.go new file mode 100644 index 00000000000..fd923548059 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_projectproperties.go @@ -0,0 +1,81 @@ +package get + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectProperties struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourceConnectionInfo ConnectionInfo `json:"sourceConnectionInfo"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetConnectionInfo ConnectionInfo `json:"targetConnectionInfo"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` +} + +func (o *ProjectProperties) GetCreationTimeAsTime() (*time.Time, error) { + if o.CreationTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CreationTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectProperties) SetCreationTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationTime = &formatted +} + +var _ json.Unmarshaler = &ProjectProperties{} + +func (s *ProjectProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.AzureAuthenticationInfo = decoded.AzureAuthenticationInfo + s.CreationTime = decoded.CreationTime + s.DatabasesInfo = decoded.DatabasesInfo + s.ProvisioningState = decoded.ProvisioningState + s.SourcePlatform = decoded.SourcePlatform + s.TargetPlatform = decoded.TargetPlatform + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["sourceConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'SourceConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.SourceConnectionInfo = impl + } + + if v, ok := temp["targetConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'TargetConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.TargetConnectionInfo = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_projecttask.go b/resource-manager/datamigration/2025-06-30/get/model_projecttask.go new file mode 100644 index 00000000000..ce137fa8d6a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_projecttask.go @@ -0,0 +1,56 @@ +package get + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_projecttaskproperties.go new file mode 100644 index 00000000000..072f6b8a13f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package get + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/get/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..6d3e0732de2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/get/model_queryexecutionresult.go new file mode 100644 index 00000000000..7d508040231 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_quota.go b/resource-manager/datamigration/2025-06-30/get/model_quota.go new file mode 100644 index 00000000000..202852c0826 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_quota.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Quota struct { + CurrentValue *float64 `json:"currentValue,omitempty"` + Id *string `json:"id,omitempty"` + Limit *float64 `json:"limit,omitempty"` + Name *QuotaName `json:"name,omitempty"` + Unit *string `json:"unit,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_quotaname.go b/resource-manager/datamigration/2025-06-30/get/model_quotaname.go new file mode 100644 index 00000000000..8d8eb3b5c8a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_quotaname.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QuotaName struct { + LocalizedValue *string `json:"localizedValue,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/get/model_reportableexception.go new file mode 100644 index 00000000000..9897733972f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_reportableexception.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_resourcesku.go b/resource-manager/datamigration/2025-06-30/get/model_resourcesku.go new file mode 100644 index 00000000000..31ecac69005 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_resourcesku.go @@ -0,0 +1,19 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSku struct { + ApiVersions *[]string `json:"apiVersions,omitempty"` + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + Family *string `json:"family,omitempty"` + Kind *string `json:"kind,omitempty"` + Locations *[]string `json:"locations,omitempty"` + Name *string `json:"name,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_resourceskucapabilities.go b/resource-manager/datamigration/2025-06-30/get/model_resourceskucapabilities.go new file mode 100644 index 00000000000..99ea1e05a86 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_resourceskucapabilities.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuCapabilities struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_resourceskucapacity.go b/resource-manager/datamigration/2025-06-30/get/model_resourceskucapacity.go new file mode 100644 index 00000000000..46cfbe74c7b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_resourceskucapacity.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuCapacity struct { + Default *int64 `json:"default,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` + ScaleType *ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_resourceskucosts.go b/resource-manager/datamigration/2025-06-30/get/model_resourceskucosts.go new file mode 100644 index 00000000000..ebe35375798 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_resourceskucosts.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuCosts struct { + ExtendedUnit *string `json:"extendedUnit,omitempty"` + MeterID *string `json:"meterID,omitempty"` + Quantity *int64 `json:"quantity,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_resourceskurestrictions.go b/resource-manager/datamigration/2025-06-30/get/model_resourceskurestrictions.go new file mode 100644 index 00000000000..fd1e9ac4189 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_resourceskurestrictions.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuRestrictions struct { + ReasonCode *ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` + Type *ResourceSkuRestrictionsType `json:"type,omitempty"` + Values *[]string `json:"values,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/get/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..450e14e555c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/get/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..a6eb932d29d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/get/model_selectedcertificateinput.go new file mode 100644 index 00000000000..e4f03a1ac93 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/get/model_serverproperties.go new file mode 100644 index 00000000000..098c5abad12 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_serverproperties.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_servicesku.go b/resource-manager/datamigration/2025-06-30/get/model_servicesku.go new file mode 100644 index 00000000000..a7d0a601bdc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_servicesku.go @@ -0,0 +1,12 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceSku struct { + Capacity *int64 `json:"capacity,omitempty"` + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/get/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..4c1794a1de9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..b863e4700e8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/get/model_ssismigrationinfo.go new file mode 100644 index 00000000000..236005a2f9d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/get/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..5cc7df5b00d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/get/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..4dfc9866b8f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..47f943e9ebb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..681cb6c7b3e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..ea2f3f8638c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..9a1f0533131 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..86db7f747d2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..0f20f361a61 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..f766ae513bf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/get/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..5782f89ad0d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package get + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..cb3125fe455 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/get/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..22a3f3b4b20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/get/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..4b1dbf58c38 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_validationerror.go b/resource-manager/datamigration/2025-06-30/get/model_validationerror.go new file mode 100644 index 00000000000..c3eb5dec554 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_validationerror.go @@ -0,0 +1,9 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/get/model_waitstatistics.go new file mode 100644 index 00000000000..d4f100bf792 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/model_waitstatistics.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/get/predicates.go b/resource-manager/datamigration/2025-06-30/get/predicates.go new file mode 100644 index 00000000000..b4dbd9be7bb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/predicates.go @@ -0,0 +1,210 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuOperationPredicate struct { + ResourceType *string +} + +func (p AvailableServiceSkuOperationPredicate) Matches(input AvailableServiceSku) bool { + + if p.ResourceType != nil && (input.ResourceType == nil || *p.ResourceType != *input.ResourceType) { + return false + } + + return true +} + +type DataMigrationServiceOperationPredicate struct { + Etag *string + Id *string + Kind *string + Location *string + Name *string + Type *string +} + +func (p DataMigrationServiceOperationPredicate) Matches(input DataMigrationService) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Kind != nil && (input.Kind == nil || *p.Kind != *input.Kind) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type ProjectOperationPredicate struct { + Etag *string + Id *string + Location *string + Name *string + Type *string +} + +func (p ProjectOperationPredicate) Matches(input Project) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type ProjectFileOperationPredicate struct { + Etag *string + Id *string + Name *string + Type *string +} + +func (p ProjectFileOperationPredicate) Matches(input ProjectFile) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type ProjectTaskOperationPredicate struct { + Etag *string + Id *string + Name *string + Type *string +} + +func (p ProjectTaskOperationPredicate) Matches(input ProjectTask) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type QuotaOperationPredicate struct { + CurrentValue *float64 + Id *string + Limit *float64 + Unit *string +} + +func (p QuotaOperationPredicate) Matches(input Quota) bool { + + if p.CurrentValue != nil && (input.CurrentValue == nil || *p.CurrentValue != *input.CurrentValue) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Limit != nil && (input.Limit == nil || *p.Limit != *input.Limit) { + return false + } + + if p.Unit != nil && (input.Unit == nil || *p.Unit != *input.Unit) { + return false + } + + return true +} + +type ResourceSkuOperationPredicate struct { + Family *string + Kind *string + Name *string + ResourceType *string + Size *string + Tier *string +} + +func (p ResourceSkuOperationPredicate) Matches(input ResourceSku) bool { + + if p.Family != nil && (input.Family == nil || *p.Family != *input.Family) { + return false + } + + if p.Kind != nil && (input.Kind == nil || *p.Kind != *input.Kind) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.ResourceType != nil && (input.ResourceType == nil || *p.ResourceType != *input.ResourceType) { + return false + } + + if p.Size != nil && (input.Size == nil || *p.Size != *input.Size) { + return false + } + + if p.Tier != nil && (input.Tier == nil || *p.Tier != *input.Tier) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/get/version.go b/resource-manager/datamigration/2025-06-30/get/version.go new file mode 100644 index 00000000000..be1017f1655 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/get/version.go @@ -0,0 +1,10 @@ +package get + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/get/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/README.md b/resource-manager/datamigration/2025-06-30/migrationservices/README.md new file mode 100644 index 00000000000..8313abb9771 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/README.md @@ -0,0 +1,134 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/migrationservices` Documentation + +The `migrationservices` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/migrationservices" +``` + + +### Client Initialization + +```go +client := migrationservices.NewMigrationServicesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `MigrationServicesClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := migrationservices.NewMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "migrationServiceName") + +payload := migrationservices.MigrationService{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `MigrationServicesClient.Delete` + +```go +ctx := context.TODO() +id := migrationservices.NewMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "migrationServiceName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `MigrationServicesClient.Get` + +```go +ctx := context.TODO() +id := migrationservices.NewMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "migrationServiceName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `MigrationServicesClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := commonids.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `MigrationServicesClient.ListBySubscription` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ListBySubscription(ctx, id)` can be used to do batched pagination +items, err := client.ListBySubscriptionComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `MigrationServicesClient.ListMigrations` + +```go +ctx := context.TODO() +id := migrationservices.NewMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "migrationServiceName") + +// alternatively `client.ListMigrations(ctx, id)` can be used to do batched pagination +items, err := client.ListMigrationsComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `MigrationServicesClient.Update` + +```go +ctx := context.TODO() +id := migrationservices.NewMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "migrationServiceName") + +payload := migrationservices.MigrationServiceUpdate{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/client.go b/resource-manager/datamigration/2025-06-30/migrationservices/client.go new file mode 100644 index 00000000000..dd443cd3866 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/client.go @@ -0,0 +1,26 @@ +package migrationservices + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationServicesClient struct { + Client *resourcemanager.Client +} + +func NewMigrationServicesClientWithBaseURI(sdkApi sdkEnv.Api) (*MigrationServicesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "migrationservices", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating MigrationServicesClient: %+v", err) + } + + return &MigrationServicesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/constants.go b/resource-manager/datamigration/2025-06-30/migrationservices/constants.go new file mode 100644 index 00000000000..db8a394a478 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/constants.go @@ -0,0 +1,198 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthType string + +const ( + AuthTypeAccountKey AuthType = "AccountKey" + AuthTypeManagedIdentity AuthType = "ManagedIdentity" +) + +func PossibleValuesForAuthType() []string { + return []string{ + string(AuthTypeAccountKey), + string(AuthTypeManagedIdentity), + } +} + +func (s *AuthType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthType(input string) (*AuthType, error) { + vals := map[string]AuthType{ + "accountkey": AuthTypeAccountKey, + "managedidentity": AuthTypeManagedIdentity, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthType(input) + return &out, nil +} + +type MongoMigrationStatus string + +const ( + MongoMigrationStatusCanceled MongoMigrationStatus = "Canceled" + MongoMigrationStatusCompleted MongoMigrationStatus = "Completed" + MongoMigrationStatusFailed MongoMigrationStatus = "Failed" + MongoMigrationStatusInProgress MongoMigrationStatus = "InProgress" + MongoMigrationStatusNotStarted MongoMigrationStatus = "NotStarted" +) + +func PossibleValuesForMongoMigrationStatus() []string { + return []string{ + string(MongoMigrationStatusCanceled), + string(MongoMigrationStatusCompleted), + string(MongoMigrationStatusFailed), + string(MongoMigrationStatusInProgress), + string(MongoMigrationStatusNotStarted), + } +} + +func (s *MongoMigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoMigrationStatus(input string) (*MongoMigrationStatus, error) { + vals := map[string]MongoMigrationStatus{ + "canceled": MongoMigrationStatusCanceled, + "completed": MongoMigrationStatusCompleted, + "failed": MongoMigrationStatusFailed, + "inprogress": MongoMigrationStatusInProgress, + "notstarted": MongoMigrationStatusNotStarted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoMigrationStatus(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type ResourceType string + +const ( + ResourceTypeMongoToCosmosDbMongo ResourceType = "MongoToCosmosDbMongo" + ResourceTypeSqlDb ResourceType = "SqlDb" + ResourceTypeSqlMi ResourceType = "SqlMi" + ResourceTypeSqlVM ResourceType = "SqlVm" +) + +func PossibleValuesForResourceType() []string { + return []string{ + string(ResourceTypeMongoToCosmosDbMongo), + string(ResourceTypeSqlDb), + string(ResourceTypeSqlMi), + string(ResourceTypeSqlVM), + } +} + +func (s *ResourceType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceType(input string) (*ResourceType, error) { + vals := map[string]ResourceType{ + "mongotocosmosdbmongo": ResourceTypeMongoToCosmosDbMongo, + "sqldb": ResourceTypeSqlDb, + "sqlmi": ResourceTypeSqlMi, + "sqlvm": ResourceTypeSqlVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceType(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/id_migrationservice.go b/resource-manager/datamigration/2025-06-30/migrationservices/id_migrationservice.go new file mode 100644 index 00000000000..aa1d897c4e5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/id_migrationservice.go @@ -0,0 +1,130 @@ +package migrationservices + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&MigrationServiceId{}) +} + +var _ resourceids.ResourceId = &MigrationServiceId{} + +// MigrationServiceId is a struct representing the Resource ID for a Migration Service +type MigrationServiceId struct { + SubscriptionId string + ResourceGroupName string + MigrationServiceName string +} + +// NewMigrationServiceID returns a new MigrationServiceId struct +func NewMigrationServiceID(subscriptionId string, resourceGroupName string, migrationServiceName string) MigrationServiceId { + return MigrationServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + MigrationServiceName: migrationServiceName, + } +} + +// ParseMigrationServiceID parses 'input' into a MigrationServiceId +func ParseMigrationServiceID(input string) (*MigrationServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&MigrationServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MigrationServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseMigrationServiceIDInsensitively parses 'input' case-insensitively into a MigrationServiceId +// note: this method should only be used for API response data and not user input +func ParseMigrationServiceIDInsensitively(input string) (*MigrationServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&MigrationServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := MigrationServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *MigrationServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.MigrationServiceName, ok = input.Parsed["migrationServiceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "migrationServiceName", input) + } + + return nil +} + +// ValidateMigrationServiceID checks that 'input' can be parsed as a Migration Service ID +func ValidateMigrationServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseMigrationServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Migration Service ID +func (id MigrationServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/migrationServices/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.MigrationServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Migration Service ID +func (id MigrationServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticMigrationServices", "migrationServices", "migrationServices"), + resourceids.UserSpecifiedSegment("migrationServiceName", "migrationServiceName"), + } +} + +// String returns a human-readable description of this Migration Service ID +func (id MigrationServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Migration Service Name: %q", id.MigrationServiceName), + } + return fmt.Sprintf("Migration Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/id_migrationservice_test.go b/resource-manager/datamigration/2025-06-30/migrationservices/id_migrationservice_test.go new file mode 100644 index 00000000000..00ba74f4e6f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/id_migrationservice_test.go @@ -0,0 +1,282 @@ +package migrationservices + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &MigrationServiceId{} + +func TestNewMigrationServiceID(t *testing.T) { + id := NewMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "migrationServiceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.MigrationServiceName != "migrationServiceName" { + t.Fatalf("Expected %q but got %q for Segment 'MigrationServiceName'", id.MigrationServiceName, "migrationServiceName") + } +} + +func TestFormatMigrationServiceID(t *testing.T) { + actual := NewMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "migrationServiceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/migrationServices/migrationServiceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseMigrationServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MigrationServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/migrationServices", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/migrationServices/migrationServiceName", + Expected: &MigrationServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + MigrationServiceName: "migrationServiceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/migrationServices/migrationServiceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMigrationServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.MigrationServiceName != v.Expected.MigrationServiceName { + t.Fatalf("Expected %q but got %q for MigrationServiceName", v.Expected.MigrationServiceName, actual.MigrationServiceName) + } + + } +} + +func TestParseMigrationServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *MigrationServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/migrationServices", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/mIgRaTiOnSeRvIcEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/migrationServices/migrationServiceName", + Expected: &MigrationServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + MigrationServiceName: "migrationServiceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/migrationServices/migrationServiceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/mIgRaTiOnSeRvIcEs/mIgRaTiOnSeRvIcEnAmE", + Expected: &MigrationServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + MigrationServiceName: "mIgRaTiOnSeRvIcEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/mIgRaTiOnSeRvIcEs/mIgRaTiOnSeRvIcEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseMigrationServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.MigrationServiceName != v.Expected.MigrationServiceName { + t.Fatalf("Expected %q but got %q for MigrationServiceName", v.Expected.MigrationServiceName, actual.MigrationServiceName) + } + + } +} + +func TestSegmentsForMigrationServiceId(t *testing.T) { + segments := MigrationServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("MigrationServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/method_createorupdate.go b/resource-manager/datamigration/2025-06-30/migrationservices/method_createorupdate.go new file mode 100644 index 00000000000..87474a62def --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/method_createorupdate.go @@ -0,0 +1,75 @@ +package migrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *MigrationService +} + +// CreateOrUpdate ... +func (c MigrationServicesClient) CreateOrUpdate(ctx context.Context, id MigrationServiceId, input MigrationService) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c MigrationServicesClient) CreateOrUpdateThenPoll(ctx context.Context, id MigrationServiceId, input MigrationService) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsdelete.go b/resource-manager/datamigration/2025-06-30/migrationservices/method_delete.go similarity index 69% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsdelete.go rename to resource-manager/datamigration/2025-06-30/migrationservices/method_delete.go index 7c57e945c5f..57d5deada46 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsdelete.go +++ b/resource-manager/datamigration/2025-06-30/migrationservices/method_delete.go @@ -1,4 +1,4 @@ -package openapis +package migrationservices import ( "context" @@ -14,14 +14,14 @@ import ( // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. -type VaultsDeleteOperationResponse struct { +type DeleteOperationResponse struct { Poller pollers.Poller HttpResponse *http.Response OData *odata.OData } -// VaultsDelete ... -func (c OpenapisClient) VaultsDelete(ctx context.Context, id VaultId) (result VaultsDeleteOperationResponse, err error) { +// Delete ... +func (c MigrationServicesClient) Delete(ctx context.Context, id MigrationServiceId) (result DeleteOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ @@ -55,15 +55,15 @@ func (c OpenapisClient) VaultsDelete(ctx context.Context, id VaultId) (result Va return } -// VaultsDeleteThenPoll performs VaultsDelete then polls until it's completed -func (c OpenapisClient) VaultsDeleteThenPoll(ctx context.Context, id VaultId) error { - result, err := c.VaultsDelete(ctx, id) +// DeleteThenPoll performs Delete then polls until it's completed +func (c MigrationServicesClient) DeleteThenPoll(ctx context.Context, id MigrationServiceId) error { + result, err := c.Delete(ctx, id) if err != nil { - return fmt.Errorf("performing VaultsDelete: %+v", err) + return fmt.Errorf("performing Delete: %+v", err) } if err := result.Poller.PollUntilDone(ctx); err != nil { - return fmt.Errorf("polling after VaultsDelete: %+v", err) + return fmt.Errorf("polling after Delete: %+v", err) } return nil diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/method_get.go b/resource-manager/datamigration/2025-06-30/migrationservices/method_get.go new file mode 100644 index 00000000000..96381a1affa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/method_get.go @@ -0,0 +1,53 @@ +package migrationservices + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *MigrationService +} + +// Get ... +func (c MigrationServicesClient) Get(ctx context.Context, id MigrationServiceId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model MigrationService + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/method_listbyresourcegroup.go b/resource-manager/datamigration/2025-06-30/migrationservices/method_listbyresourcegroup.go new file mode 100644 index 00000000000..911f65e1c59 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/method_listbyresourcegroup.go @@ -0,0 +1,106 @@ +package migrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]MigrationService +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []MigrationService +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c MigrationServicesClient) ListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/migrationServices", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]MigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c MigrationServicesClient) ListByResourceGroupComplete(ctx context.Context, id commonids.ResourceGroupId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, MigrationServiceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c MigrationServicesClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate MigrationServiceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]MigrationService, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/method_listbysubscription.go b/resource-manager/datamigration/2025-06-30/migrationservices/method_listbysubscription.go new file mode 100644 index 00000000000..143b4c126aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/method_listbysubscription.go @@ -0,0 +1,106 @@ +package migrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListBySubscriptionOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]MigrationService +} + +type ListBySubscriptionCompleteResult struct { + LatestHttpResponse *http.Response + Items []MigrationService +} + +type ListBySubscriptionCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListBySubscriptionCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListBySubscription ... +func (c MigrationServicesClient) ListBySubscription(ctx context.Context, id commonids.SubscriptionId) (result ListBySubscriptionOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListBySubscriptionCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/migrationServices", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]MigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListBySubscriptionComplete retrieves all the results into a single object +func (c MigrationServicesClient) ListBySubscriptionComplete(ctx context.Context, id commonids.SubscriptionId) (ListBySubscriptionCompleteResult, error) { + return c.ListBySubscriptionCompleteMatchingPredicate(ctx, id, MigrationServiceOperationPredicate{}) +} + +// ListBySubscriptionCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c MigrationServicesClient) ListBySubscriptionCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate MigrationServiceOperationPredicate) (result ListBySubscriptionCompleteResult, err error) { + items := make([]MigrationService, 0) + + resp, err := c.ListBySubscription(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListBySubscriptionCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/method_listmigrations.go b/resource-manager/datamigration/2025-06-30/migrationservices/method_listmigrations.go new file mode 100644 index 00000000000..000d441ab69 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/method_listmigrations.go @@ -0,0 +1,105 @@ +package migrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListMigrationsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DatabaseMigrationBase +} + +type ListMigrationsCompleteResult struct { + LatestHttpResponse *http.Response + Items []DatabaseMigrationBase +} + +type ListMigrationsCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListMigrationsCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListMigrations ... +func (c MigrationServicesClient) ListMigrations(ctx context.Context, id MigrationServiceId) (result ListMigrationsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListMigrationsCustomPager{}, + Path: fmt.Sprintf("%s/listMigrations", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DatabaseMigrationBase `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListMigrationsComplete retrieves all the results into a single object +func (c MigrationServicesClient) ListMigrationsComplete(ctx context.Context, id MigrationServiceId) (ListMigrationsCompleteResult, error) { + return c.ListMigrationsCompleteMatchingPredicate(ctx, id, DatabaseMigrationBaseOperationPredicate{}) +} + +// ListMigrationsCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c MigrationServicesClient) ListMigrationsCompleteMatchingPredicate(ctx context.Context, id MigrationServiceId, predicate DatabaseMigrationBaseOperationPredicate) (result ListMigrationsCompleteResult, err error) { + items := make([]DatabaseMigrationBase, 0) + + resp, err := c.ListMigrations(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListMigrationsCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/method_update.go b/resource-manager/datamigration/2025-06-30/migrationservices/method_update.go new file mode 100644 index 00000000000..37bb4d44ef0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/method_update.go @@ -0,0 +1,75 @@ +package migrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *MigrationService +} + +// Update ... +func (c MigrationServicesClient) Update(ctx context.Context, id MigrationServiceId, input MigrationServiceUpdate) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c MigrationServicesClient) UpdateThenPoll(ctx context.Context, id MigrationServiceId, input MigrationServiceUpdate) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_azureblob.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_azureblob.go new file mode 100644 index 00000000000..f73f599cda8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_azureblob.go @@ -0,0 +1,16 @@ +package migrationservices + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureBlob struct { + AccountKey *string `json:"accountKey,omitempty"` + AuthType *AuthType `json:"authType,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_backupconfiguration.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_backupconfiguration.go new file mode 100644 index 00000000000..13efb70139f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_backupconfiguration.go @@ -0,0 +1,9 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupConfiguration struct { + SourceLocation *SourceLocation `json:"sourceLocation,omitempty"` + TargetLocation *TargetLocation `json:"targetLocation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_copyprogressdetails.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_copyprogressdetails.go new file mode 100644 index 00000000000..97edd3ecf6d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_copyprogressdetails.go @@ -0,0 +1,36 @@ +package migrationservices + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CopyProgressDetails struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyStart *string `json:"copyStart,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + ParallelCopyType *string `json:"parallelCopyType,omitempty"` + RowsCopied *int64 `json:"rowsCopied,omitempty"` + RowsRead *int64 `json:"rowsRead,omitempty"` + Status *string `json:"status,omitempty"` + TableName *string `json:"tableName,omitempty"` + UsedParallelCopies *int64 `json:"usedParallelCopies,omitempty"` +} + +func (o *CopyProgressDetails) GetCopyStartAsTime() (*time.Time, error) { + if o.CopyStart == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CopyStart, "2006-01-02T15:04:05Z07:00") +} + +func (o *CopyProgressDetails) SetCopyStartAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CopyStart = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationbase.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationbase.go new file mode 100644 index 00000000000..912059fc6ca --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationbase.go @@ -0,0 +1,53 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationBase struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties DatabaseMigrationBaseProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &DatabaseMigrationBase{} + +func (s *DatabaseMigrationBase) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling DatabaseMigrationBase into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalDatabaseMigrationBasePropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'DatabaseMigrationBase': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationbaseproperties.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationbaseproperties.go new file mode 100644 index 00000000000..da95a2780b2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationbaseproperties.go @@ -0,0 +1,92 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationBaseProperties interface { + DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl +} + +var _ DatabaseMigrationBaseProperties = BaseDatabaseMigrationBasePropertiesImpl{} + +type BaseDatabaseMigrationBasePropertiesImpl struct { + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s BaseDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s +} + +var _ DatabaseMigrationBaseProperties = RawDatabaseMigrationBasePropertiesImpl{} + +// RawDatabaseMigrationBasePropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawDatabaseMigrationBasePropertiesImpl struct { + databaseMigrationBaseProperties BaseDatabaseMigrationBasePropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s.databaseMigrationBaseProperties +} + +func UnmarshalDatabaseMigrationBasePropertiesImplementation(input []byte) (DatabaseMigrationBaseProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationBaseProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["kind"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseMigrationProperties") { + var out DatabaseMigrationProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoToCosmosDbMongo") { + var out DatabaseMigrationPropertiesCosmosDbMongo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + return out, nil + } + + var parent BaseDatabaseMigrationBasePropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseDatabaseMigrationBasePropertiesImpl: %+v", err) + } + + return RawDatabaseMigrationBasePropertiesImpl{ + databaseMigrationBaseProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationproperties.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationproperties.go new file mode 100644 index 00000000000..43c928f3bc1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationproperties.go @@ -0,0 +1,98 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationProperties{} + +type DatabaseMigrationProperties struct { + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceSqlConnection *SqlConnectionInformation `json:"sourceSqlConnection,omitempty"` + TargetDatabaseCollation *string `json:"targetDatabaseCollation,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationProperties) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationProperties) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationProperties) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationProperties{} + +func (s DatabaseMigrationProperties) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationProperties: %+v", err) + } + + decoded["kind"] = "DatabaseMigrationProperties" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiescosmosdbmongo.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiescosmosdbmongo.go new file mode 100644 index 00000000000..a73fbff1376 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiescosmosdbmongo.go @@ -0,0 +1,97 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesCosmosDbMongo{} + +type DatabaseMigrationPropertiesCosmosDbMongo struct { + CollectionList *[]MongoMigrationCollection `json:"collectionList,omitempty"` + SourceMongoConnection *MongoConnectionInformation `json:"sourceMongoConnection,omitempty"` + TargetMongoConnection *MongoConnectionInformation `json:"targetMongoConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationPropertiesCosmosDbMongo{} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesCosmosDbMongo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + decoded["kind"] = "MongoToCosmosDbMongo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqldb.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqldb.go new file mode 100644 index 00000000000..692b1f5eb4d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqldb.go @@ -0,0 +1,71 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlDb{} + +type DatabaseMigrationPropertiesSqlDb struct { + MigrationStatusDetails *SqlDbMigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *SqlDbOfflineConfiguration `json:"offlineConfiguration,omitempty"` + TableList *[]string `json:"tableList,omitempty"` + TargetSqlConnection *SqlConnectionInformation `json:"targetSqlConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlDb) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlDb{} + +func (s DatabaseMigrationPropertiesSqlDb) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlDb + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + decoded["kind"] = "SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqlmi.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqlmi.go new file mode 100644 index 00000000000..20f3a183d08 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqlmi.go @@ -0,0 +1,70 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlMi{} + +type DatabaseMigrationPropertiesSqlMi struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlMi) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlMi{} + +func (s DatabaseMigrationPropertiesSqlMi) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlMi + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + decoded["kind"] = "SqlMi" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqlvm.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqlvm.go new file mode 100644 index 00000000000..bff79dd0a53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_databasemigrationpropertiessqlvm.go @@ -0,0 +1,70 @@ +package migrationservices + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlVM{} + +type DatabaseMigrationPropertiesSqlVM struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlVM) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlVM{} + +func (s DatabaseMigrationPropertiesSqlVM) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlVM + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + decoded["kind"] = "SqlVm" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_errorinfo.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_errorinfo.go new file mode 100644 index 00000000000..9b78c116e8f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_errorinfo.go @@ -0,0 +1,9 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ErrorInfo struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationservice.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationservice.go new file mode 100644 index 00000000000..c784df2cbd6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationservice.go @@ -0,0 +1,18 @@ +package migrationservices + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationService struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *MigrationServiceProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationserviceproperties.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationserviceproperties.go new file mode 100644 index 00000000000..276397092be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationserviceproperties.go @@ -0,0 +1,9 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationServiceProperties struct { + IntegrationRuntimeState *string `json:"integrationRuntimeState,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationserviceupdate.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationserviceupdate.go new file mode 100644 index 00000000000..31f867e565d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationserviceupdate.go @@ -0,0 +1,8 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationServiceUpdate struct { + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationstatusdetails.go new file mode 100644 index 00000000000..a37118efaff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_migrationstatusdetails.go @@ -0,0 +1,20 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationStatusDetails struct { + ActiveBackupSets *[]SqlBackupSetInfo `json:"activeBackupSets,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + CompleteRestoreErrorMessage *string `json:"completeRestoreErrorMessage,omitempty"` + CurrentRestoringFilename *string `json:"currentRestoringFilename,omitempty"` + FileUploadBlockingErrors *[]string `json:"fileUploadBlockingErrors,omitempty"` + FullBackupSetInfo *SqlBackupSetInfo `json:"fullBackupSetInfo,omitempty"` + InvalidFiles *[]string `json:"invalidFiles,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *SqlBackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + LastRestoredFilename *string `json:"lastRestoredFilename,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + PendingLogBackupsCount *int64 `json:"pendingLogBackupsCount,omitempty"` + RestoreBlockingReason *string `json:"restoreBlockingReason,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_mongoconnectioninformation.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_mongoconnectioninformation.go new file mode 100644 index 00000000000..af2c73f921d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_mongoconnectioninformation.go @@ -0,0 +1,13 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoConnectionInformation struct { + ConnectionString *string `json:"connectionString,omitempty"` + Host *string `json:"host,omitempty"` + Password *string `json:"password,omitempty"` + Port *int64 `json:"port,omitempty"` + UseSsl *bool `json:"useSsl,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_mongomigrationcollection.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_mongomigrationcollection.go new file mode 100644 index 00000000000..606e5f7c4d2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_mongomigrationcollection.go @@ -0,0 +1,12 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationCollection struct { + MigrationProgressDetails *MongoMigrationProgressDetails `json:"migrationProgressDetails,omitempty"` + SourceCollection *string `json:"sourceCollection,omitempty"` + SourceDatabase *string `json:"sourceDatabase,omitempty"` + TargetCollection *string `json:"targetCollection,omitempty"` + TargetDatabase *string `json:"targetDatabase,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_mongomigrationprogressdetails.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_mongomigrationprogressdetails.go new file mode 100644 index 00000000000..cba12ea87ae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_mongomigrationprogressdetails.go @@ -0,0 +1,12 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationProgressDetails struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + MigrationError *string `json:"migrationError,omitempty"` + MigrationStatus *MongoMigrationStatus `json:"migrationStatus,omitempty"` + ProcessedDocumentCount *int64 `json:"processedDocumentCount,omitempty"` + SourceDocumentCount *int64 `json:"sourceDocumentCount,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_offlineconfiguration.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_offlineconfiguration.go new file mode 100644 index 00000000000..08202c6575c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_offlineconfiguration.go @@ -0,0 +1,9 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OfflineConfiguration struct { + LastBackupName *string `json:"lastBackupName,omitempty"` + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_sourcelocation.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_sourcelocation.go new file mode 100644 index 00000000000..ddd3e1645a0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_sourcelocation.go @@ -0,0 +1,10 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SourceLocation struct { + AzureBlob *AzureBlob `json:"azureBlob,omitempty"` + FileShare *SqlFileShare `json:"fileShare,omitempty"` + FileStorageType *string `json:"fileStorageType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlbackupfileinfo.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlbackupfileinfo.go new file mode 100644 index 00000000000..0bfbdfbae1e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlbackupfileinfo.go @@ -0,0 +1,15 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupFileInfo struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileName *string `json:"fileName,omitempty"` + Status *string `json:"status,omitempty"` + TotalSize *int64 `json:"totalSize,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlbackupsetinfo.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlbackupsetinfo.go new file mode 100644 index 00000000000..91db39398f3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlbackupsetinfo.go @@ -0,0 +1,48 @@ +package migrationservices + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupSetInfo struct { + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *string `json:"backupType,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + FirstLSN *string `json:"firstLSN,omitempty"` + HasBackupChecksums *bool `json:"hasBackupChecksums,omitempty"` + IgnoreReasons *[]string `json:"ignoreReasons,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLSN *string `json:"lastLSN,omitempty"` + ListOfBackupFiles *[]SqlBackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *SqlBackupSetInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} + +func (o *SqlBackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlconnectioninformation.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlconnectioninformation.go new file mode 100644 index 00000000000..1ce4866f6e0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlconnectioninformation.go @@ -0,0 +1,13 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlConnectionInformation struct { + Authentication *string `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Password *string `json:"password,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_sqldbmigrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqldbmigrationstatusdetails.go new file mode 100644 index 00000000000..1acb3ed6316 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqldbmigrationstatusdetails.go @@ -0,0 +1,10 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbMigrationStatusDetails struct { + ListOfCopyProgressDetails *[]CopyProgressDetails `json:"listOfCopyProgressDetails,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + SqlDataCopyErrors *[]string `json:"sqlDataCopyErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_sqldbofflineconfiguration.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqldbofflineconfiguration.go new file mode 100644 index 00000000000..3f0e8805070 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqldbofflineconfiguration.go @@ -0,0 +1,8 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbOfflineConfiguration struct { + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlfileshare.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlfileshare.go new file mode 100644 index 00000000000..ddd8cb9de0a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_sqlfileshare.go @@ -0,0 +1,10 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlFileShare struct { + Password *string `json:"password,omitempty"` + Path *string `json:"path,omitempty"` + Username *string `json:"username,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/model_targetlocation.go b/resource-manager/datamigration/2025-06-30/migrationservices/model_targetlocation.go new file mode 100644 index 00000000000..616a4f97dda --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/model_targetlocation.go @@ -0,0 +1,9 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TargetLocation struct { + AccountKey *string `json:"accountKey,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/predicates.go b/resource-manager/datamigration/2025-06-30/migrationservices/predicates.go new file mode 100644 index 00000000000..41b16e58cda --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/predicates.go @@ -0,0 +1,55 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationBaseOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p DatabaseMigrationBaseOperationPredicate) Matches(input DatabaseMigrationBase) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type MigrationServiceOperationPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p MigrationServiceOperationPredicate) Matches(input MigrationService) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/migrationservices/version.go b/resource-manager/datamigration/2025-06-30/migrationservices/version.go new file mode 100644 index 00000000000..370ad07b0d7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/migrationservices/version.go @@ -0,0 +1,10 @@ +package migrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/migrationservices/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/patch/README.md b/resource-manager/datamigration/2025-06-30/patch/README.md new file mode 100644 index 00000000000..cc92a6c8ab4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/README.md @@ -0,0 +1,121 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/patch` Documentation + +The `patch` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/patch" +``` + + +### Client Initialization + +```go +client := patch.NewPATCHClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `PATCHClient.FilesUpdate` + +```go +ctx := context.TODO() +id := patch.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +payload := patch.ProjectFile{ + // ... +} + + +read, err := client.FilesUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PATCHClient.ProjectsUpdate` + +```go +ctx := context.TODO() +id := patch.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +payload := patch.Project{ + // ... +} + + +read, err := client.ProjectsUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PATCHClient.ServiceTasksUpdate` + +```go +ctx := context.TODO() +id := patch.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +payload := patch.ProjectTask{ + // ... +} + + +read, err := client.ServiceTasksUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PATCHClient.ServicesUpdate` + +```go +ctx := context.TODO() +id := patch.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := patch.DataMigrationService{ + // ... +} + + +if err := client.ServicesUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `PATCHClient.TasksUpdate` + +```go +ctx := context.TODO() +id := patch.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := patch.ProjectTask{ + // ... +} + + +read, err := client.TasksUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/patch/client.go b/resource-manager/datamigration/2025-06-30/patch/client.go new file mode 100644 index 00000000000..80052bdd76d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/client.go @@ -0,0 +1,26 @@ +package patch + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PATCHClient struct { + Client *resourcemanager.Client +} + +func NewPATCHClientWithBaseURI(sdkApi sdkEnv.Api) (*PATCHClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "patch", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating PATCHClient: %+v", err) + } + + return &PATCHClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/constants.go b/resource-manager/datamigration/2025-06-30/patch/constants.go new file mode 100644 index 00000000000..e68137e8541 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/constants.go @@ -0,0 +1,2314 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ProjectProvisioningState string + +const ( + ProjectProvisioningStateDeleting ProjectProvisioningState = "Deleting" + ProjectProvisioningStateSucceeded ProjectProvisioningState = "Succeeded" +) + +func PossibleValuesForProjectProvisioningState() []string { + return []string{ + string(ProjectProvisioningStateDeleting), + string(ProjectProvisioningStateSucceeded), + } +} + +func (s *ProjectProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectProvisioningState(input string) (*ProjectProvisioningState, error) { + vals := map[string]ProjectProvisioningState{ + "deleting": ProjectProvisioningStateDeleting, + "succeeded": ProjectProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectProvisioningState(input) + return &out, nil +} + +type ProjectSourcePlatform string + +const ( + ProjectSourcePlatformMongoDb ProjectSourcePlatform = "MongoDb" + ProjectSourcePlatformMySQL ProjectSourcePlatform = "MySQL" + ProjectSourcePlatformPostgreSql ProjectSourcePlatform = "PostgreSql" + ProjectSourcePlatformSQL ProjectSourcePlatform = "SQL" + ProjectSourcePlatformUnknown ProjectSourcePlatform = "Unknown" +) + +func PossibleValuesForProjectSourcePlatform() []string { + return []string{ + string(ProjectSourcePlatformMongoDb), + string(ProjectSourcePlatformMySQL), + string(ProjectSourcePlatformPostgreSql), + string(ProjectSourcePlatformSQL), + string(ProjectSourcePlatformUnknown), + } +} + +func (s *ProjectSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectSourcePlatform(input string) (*ProjectSourcePlatform, error) { + vals := map[string]ProjectSourcePlatform{ + "mongodb": ProjectSourcePlatformMongoDb, + "mysql": ProjectSourcePlatformMySQL, + "postgresql": ProjectSourcePlatformPostgreSql, + "sql": ProjectSourcePlatformSQL, + "unknown": ProjectSourcePlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectSourcePlatform(input) + return &out, nil +} + +type ProjectTargetPlatform string + +const ( + ProjectTargetPlatformAzureDbForMySql ProjectTargetPlatform = "AzureDbForMySql" + ProjectTargetPlatformAzureDbForPostgreSql ProjectTargetPlatform = "AzureDbForPostgreSql" + ProjectTargetPlatformMongoDb ProjectTargetPlatform = "MongoDb" + ProjectTargetPlatformSQLDB ProjectTargetPlatform = "SQLDB" + ProjectTargetPlatformSQLMI ProjectTargetPlatform = "SQLMI" + ProjectTargetPlatformUnknown ProjectTargetPlatform = "Unknown" +) + +func PossibleValuesForProjectTargetPlatform() []string { + return []string{ + string(ProjectTargetPlatformAzureDbForMySql), + string(ProjectTargetPlatformAzureDbForPostgreSql), + string(ProjectTargetPlatformMongoDb), + string(ProjectTargetPlatformSQLDB), + string(ProjectTargetPlatformSQLMI), + string(ProjectTargetPlatformUnknown), + } +} + +func (s *ProjectTargetPlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectTargetPlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectTargetPlatform(input string) (*ProjectTargetPlatform, error) { + vals := map[string]ProjectTargetPlatform{ + "azuredbformysql": ProjectTargetPlatformAzureDbForMySql, + "azuredbforpostgresql": ProjectTargetPlatformAzureDbForPostgreSql, + "mongodb": ProjectTargetPlatformMongoDb, + "sqldb": ProjectTargetPlatformSQLDB, + "sqlmi": ProjectTargetPlatformSQLMI, + "unknown": ProjectTargetPlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectTargetPlatform(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type ServiceProvisioningState string + +const ( + ServiceProvisioningStateAccepted ServiceProvisioningState = "Accepted" + ServiceProvisioningStateDeleting ServiceProvisioningState = "Deleting" + ServiceProvisioningStateDeploying ServiceProvisioningState = "Deploying" + ServiceProvisioningStateFailed ServiceProvisioningState = "Failed" + ServiceProvisioningStateFailedToStart ServiceProvisioningState = "FailedToStart" + ServiceProvisioningStateFailedToStop ServiceProvisioningState = "FailedToStop" + ServiceProvisioningStateStarting ServiceProvisioningState = "Starting" + ServiceProvisioningStateStopped ServiceProvisioningState = "Stopped" + ServiceProvisioningStateStopping ServiceProvisioningState = "Stopping" + ServiceProvisioningStateSucceeded ServiceProvisioningState = "Succeeded" +) + +func PossibleValuesForServiceProvisioningState() []string { + return []string{ + string(ServiceProvisioningStateAccepted), + string(ServiceProvisioningStateDeleting), + string(ServiceProvisioningStateDeploying), + string(ServiceProvisioningStateFailed), + string(ServiceProvisioningStateFailedToStart), + string(ServiceProvisioningStateFailedToStop), + string(ServiceProvisioningStateStarting), + string(ServiceProvisioningStateStopped), + string(ServiceProvisioningStateStopping), + string(ServiceProvisioningStateSucceeded), + } +} + +func (s *ServiceProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceProvisioningState(input string) (*ServiceProvisioningState, error) { + vals := map[string]ServiceProvisioningState{ + "accepted": ServiceProvisioningStateAccepted, + "deleting": ServiceProvisioningStateDeleting, + "deploying": ServiceProvisioningStateDeploying, + "failed": ServiceProvisioningStateFailed, + "failedtostart": ServiceProvisioningStateFailedToStart, + "failedtostop": ServiceProvisioningStateFailedToStop, + "starting": ServiceProvisioningStateStarting, + "stopped": ServiceProvisioningStateStopped, + "stopping": ServiceProvisioningStateStopping, + "succeeded": ServiceProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceProvisioningState(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_file.go b/resource-manager/datamigration/2025-06-30/patch/id_file.go new file mode 100644 index 00000000000..a679d5d46c3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_file.go @@ -0,0 +1,148 @@ +package patch + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_file_test.go b/resource-manager/datamigration/2025-06-30/patch/id_file_test.go new file mode 100644 index 00000000000..675641dd7e0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_file_test.go @@ -0,0 +1,372 @@ +package patch + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_project.go b/resource-manager/datamigration/2025-06-30/patch/id_project.go new file mode 100644 index 00000000000..4455c74fa01 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_project.go @@ -0,0 +1,139 @@ +package patch + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_project_test.go b/resource-manager/datamigration/2025-06-30/patch/id_project_test.go new file mode 100644 index 00000000000..4241cec2b9a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_project_test.go @@ -0,0 +1,327 @@ +package patch + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_service.go b/resource-manager/datamigration/2025-06-30/patch/id_service.go new file mode 100644 index 00000000000..66242c253dc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_service.go @@ -0,0 +1,130 @@ +package patch + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_service_test.go b/resource-manager/datamigration/2025-06-30/patch/id_service_test.go new file mode 100644 index 00000000000..3f6a5707ccc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_service_test.go @@ -0,0 +1,282 @@ +package patch + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_servicetask.go b/resource-manager/datamigration/2025-06-30/patch/id_servicetask.go new file mode 100644 index 00000000000..67c3bfd19a2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_servicetask.go @@ -0,0 +1,139 @@ +package patch + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/patch/id_servicetask_test.go new file mode 100644 index 00000000000..963e198d9cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_servicetask_test.go @@ -0,0 +1,327 @@ +package patch + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_task.go b/resource-manager/datamigration/2025-06-30/patch/id_task.go new file mode 100644 index 00000000000..a81568fe08b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_task.go @@ -0,0 +1,148 @@ +package patch + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/patch/id_task_test.go b/resource-manager/datamigration/2025-06-30/patch/id_task_test.go new file mode 100644 index 00000000000..61fd69b670e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/id_task_test.go @@ -0,0 +1,372 @@ +package patch + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/patch/method_filesupdate.go b/resource-manager/datamigration/2025-06-30/patch/method_filesupdate.go new file mode 100644 index 00000000000..52ee4c5b69e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/method_filesupdate.go @@ -0,0 +1,57 @@ +package patch + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesUpdate ... +func (c PATCHClient) FilesUpdate(ctx context.Context, id FileId, input ProjectFile) (result FilesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/patch/method_projectsupdate.go b/resource-manager/datamigration/2025-06-30/patch/method_projectsupdate.go new file mode 100644 index 00000000000..9fb392d7502 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/method_projectsupdate.go @@ -0,0 +1,57 @@ +package patch + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsUpdate ... +func (c PATCHClient) ProjectsUpdate(ctx context.Context, id ProjectId, input Project) (result ProjectsUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/patch/method_servicesupdate.go b/resource-manager/datamigration/2025-06-30/patch/method_servicesupdate.go new file mode 100644 index 00000000000..303cedf3999 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/method_servicesupdate.go @@ -0,0 +1,75 @@ +package patch + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesUpdate ... +func (c PATCHClient) ServicesUpdate(ctx context.Context, id ServiceId, input DataMigrationService) (result ServicesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesUpdateThenPoll performs ServicesUpdate then polls until it's completed +func (c PATCHClient) ServicesUpdateThenPoll(ctx context.Context, id ServiceId, input DataMigrationService) error { + result, err := c.ServicesUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ServicesUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/method_servicetasksupdate.go b/resource-manager/datamigration/2025-06-30/patch/method_servicetasksupdate.go new file mode 100644 index 00000000000..ac44c7ae58e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/method_servicetasksupdate.go @@ -0,0 +1,57 @@ +package patch + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksUpdate ... +func (c PATCHClient) ServiceTasksUpdate(ctx context.Context, id ServiceTaskId, input ProjectTask) (result ServiceTasksUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/patch/method_tasksupdate.go b/resource-manager/datamigration/2025-06-30/patch/method_tasksupdate.go new file mode 100644 index 00000000000..6701840200d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/method_tasksupdate.go @@ -0,0 +1,57 @@ +package patch + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksUpdate ... +func (c PATCHClient) TasksUpdate(ctx context.Context, id TaskId, input ProjectTask) (result TasksUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/patch/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..7c81e655cc4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_backupfileinfo.go new file mode 100644 index 00000000000..f5bf749f505 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_backupsetinfo.go new file mode 100644 index 00000000000..6655253565c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_blobshare.go b/resource-manager/datamigration/2025-06-30/patch/model_blobshare.go new file mode 100644 index 00000000000..aee7ee255a5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_blobshare.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_commandproperties.go new file mode 100644 index 00000000000..6eb0ac5460b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_commandproperties.go @@ -0,0 +1,85 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_connectioninfo.go new file mode 100644 index 00000000000..d4e2552112f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connectioninfo.go @@ -0,0 +1,117 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..23a4e5f9753 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..f4b59f832f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..457b6320b5f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..3b5962116c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..63b54e061e5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..1fec89576e0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..db3ed18e14c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..9c7d12150db --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..df74ab62822 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..5a38a0141ef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..eec21a0d123 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..b700bf7d382 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..61ba507d45b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..b8a8ec58de1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..a91c7d2caea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..fca3fd1cf80 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..1ea849d43c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..9c4ddf4a1b5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..352800cb5ce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..82e572b1c98 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..4e84e41e8eb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..a5d29cc24fe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..0ea128c57be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..15bdd31e16a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..5b96f7c8851 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..621f3e3f2f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..8e6b56931d2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..37fcb3d3a1e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..86dbb9ed0ef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..73a20b4b216 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..7046c2c68ef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..935c8eca669 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..47c76961a21 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..d13e20a94fc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..5abee038046 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..8c5b394ce73 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..248e2ab5167 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..42dea35933b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..810e67733ae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_databasebackupinfo.go new file mode 100644 index 00000000000..5de0fd030c0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_databasefileinfo.go new file mode 100644 index 00000000000..bf0623074d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_databaseinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_databaseinfo.go new file mode 100644 index 00000000000..b03da5248db --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_databaseinfo.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseInfo struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/patch/model_databasesummaryresult.go new file mode 100644 index 00000000000..70a6dd64f17 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_databasetable.go b/resource-manager/datamigration/2025-06-30/patch/model_databasetable.go new file mode 100644 index 00000000000..d754b773cb9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_databasetable.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/patch/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..cbe32cb2111 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/patch/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..907ca3a11c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_datamigrationservice.go b/resource-manager/datamigration/2025-06-30/patch/model_datamigrationservice.go new file mode 100644 index 00000000000..a03d00b7aec --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_datamigrationservice.go @@ -0,0 +1,21 @@ +package patch + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationService struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataMigrationServiceProperties `json:"properties,omitempty"` + Sku *ServiceSku `json:"sku,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_datamigrationserviceproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_datamigrationserviceproperties.go new file mode 100644 index 00000000000..668a835c5c9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_datamigrationserviceproperties.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceProperties struct { + AutoStopDelay *string `json:"autoStopDelay,omitempty"` + DeleteResourcesOnStop *bool `json:"deleteResourcesOnStop,omitempty"` + ProvisioningState *ServiceProvisioningState `json:"provisioningState,omitempty"` + PublicKey *string `json:"publicKey,omitempty"` + VirtualNicId *string `json:"virtualNicId,omitempty"` + VirtualSubnetId *string `json:"virtualSubnetId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/patch/model_executionstatistics.go new file mode 100644 index 00000000000..c2d847f8e6e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_executionstatistics.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_fileshare.go b/resource-manager/datamigration/2025-06-30/patch/model_fileshare.go new file mode 100644 index 00000000000..7e9b7c9c627 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_fileshare.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..175d6996297 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..07ec58d7944 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..9e8e769a87f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..2ef88aa4081 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..6017e8bd322 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..14aa03ced39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..328cbac28b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..92e29140f08 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..c157d931688 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..3a33427e1b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..b6fdcc83e69 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..ed5eda40f12 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..e34f5b9037f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..7ff5a4c44f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..dcc99cb0a9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..9de131b5272 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..1442e0c82d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..e2bf0fe5fc5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..2f5c14fb0eb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..b22ecf4c445 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..a6fd0bcfe1b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..cd368acf741 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..00b540a3a34 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..36c24a6f275 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..6faa5a82a54 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..95310e64583 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..cc4c1ad91dd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..599567bcec0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..6c5b2780c66 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..6ac616ba2d6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..d733db80f3a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..bff82ecbbfc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..df1a83912d3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..e107673c1fb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..a18a365d861 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..976febaebde --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..f6886c779a3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..d8a27930675 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..e0f55ed7d51 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..46d2bcb847c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..475853cddaa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..fe4642e5b03 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..ae6af5e77c9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..17ba866fe5e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..ca8cb355bee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..fe7649b678b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..aa89f2df2ee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..b9d0eb15d6a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..f5584cec0d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..23eb7db5938 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..8619c86a46a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..34ea9da5701 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..2f241b3a09e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..388fa3138fd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..cb7a4da7175 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..f655cf9027c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..4c808f1d5c5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..831a9f8c4ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..504255348f3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..dfbfd5901b5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..686c0275947 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..cbbb7492724 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..f29553c0a0f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..d3dff42690a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..35ac52e5558 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..c9f132a5034 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..57c3f0e49c5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..b4398fda77d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..0fbf7ab20d5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..90c6c8fef12 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..0705e3579f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..296dd13a722 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..401499dcfc9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..1a629135287 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..c2dcea08999 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..64382afe9c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..8a0f405da25 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..a0172a1085d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..5fbdde06d1c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..2d0de681419 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..d666e7b4da1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..3e27b5e809b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..1065a9443c6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..a0d7008ff73 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..a4637d5ee36 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..8bcda7bd36d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..c6ff3e1a875 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..59ae862b2f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..728a499dd38 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..f61f7f3e445 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..05f103b6eac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..a29201c953b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskinput.go new file mode 100644 index 00000000000..7288ed348f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutput.go new file mode 100644 index 00000000000..31a57b60b7a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..0dc5936de84 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..31433067e68 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskproperties.go new file mode 100644 index 00000000000..bfdf998fea5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..5f73e610da6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..829b416a987 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..900d8c9578a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..1869914d266 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/patch/model_migrationreportresult.go new file mode 100644 index 00000000000..cc182833fe6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..2b58c1a48b6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..e48f3bac167 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationresult.go new file mode 100644 index 00000000000..a5ca5ae7f4c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..56beb094859 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..b352bf88412 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..4e9e72b1289 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..926327faaf3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package patch + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..b252bdef98b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..15d26059cff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "MongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..21f37c6394f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..ae31a32aae3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package patch + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..27e3cea4e42 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodberror.go new file mode 100644 index 00000000000..0ec0c180922 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodberror.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..17c341ac8d7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package patch + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..7ae50968ff7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbprogress.go new file mode 100644 index 00000000000..00313435a1f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..35f642c6316 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..0526b0999fd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..5acc3e0a347 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/patch/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..cc5473f2e28 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..f3d32bdc9d5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_odataerror.go b/resource-manager/datamigration/2025-06-30/patch/model_odataerror.go new file mode 100644 index 00000000000..7bff2cb78e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_odataerror.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..570489567fa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_orphaneduserinfo.go new file mode 100644 index 00000000000..1c2344dd8d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..eb32124291e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_project.go b/resource-manager/datamigration/2025-06-30/patch/model_project.go new file mode 100644 index 00000000000..dc5473e62e5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_project.go @@ -0,0 +1,19 @@ +package patch + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Project struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_projectfile.go b/resource-manager/datamigration/2025-06-30/patch/model_projectfile.go new file mode 100644 index 00000000000..a5b88b6b566 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_projectfile.go @@ -0,0 +1,17 @@ +package patch + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFile struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectFileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_projectfileproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_projectfileproperties.go new file mode 100644 index 00000000000..0e4cf1b8637 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_projectfileproperties.go @@ -0,0 +1,30 @@ +package patch + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFileProperties struct { + Extension *string `json:"extension,omitempty"` + FilePath *string `json:"filePath,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + Size *int64 `json:"size,omitempty"` +} + +func (o *ProjectFileProperties) GetLastModifiedAsTime() (*time.Time, error) { + if o.LastModified == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModified, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectFileProperties) SetLastModifiedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModified = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_projectproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_projectproperties.go new file mode 100644 index 00000000000..dfa8c034ade --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_projectproperties.go @@ -0,0 +1,81 @@ +package patch + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectProperties struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourceConnectionInfo ConnectionInfo `json:"sourceConnectionInfo"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetConnectionInfo ConnectionInfo `json:"targetConnectionInfo"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` +} + +func (o *ProjectProperties) GetCreationTimeAsTime() (*time.Time, error) { + if o.CreationTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CreationTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectProperties) SetCreationTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationTime = &formatted +} + +var _ json.Unmarshaler = &ProjectProperties{} + +func (s *ProjectProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.AzureAuthenticationInfo = decoded.AzureAuthenticationInfo + s.CreationTime = decoded.CreationTime + s.DatabasesInfo = decoded.DatabasesInfo + s.ProvisioningState = decoded.ProvisioningState + s.SourcePlatform = decoded.SourcePlatform + s.TargetPlatform = decoded.TargetPlatform + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["sourceConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'SourceConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.SourceConnectionInfo = impl + } + + if v, ok := temp["targetConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'TargetConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.TargetConnectionInfo = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_projecttask.go b/resource-manager/datamigration/2025-06-30/patch/model_projecttask.go new file mode 100644 index 00000000000..30f61ace7c0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_projecttask.go @@ -0,0 +1,56 @@ +package patch + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_projecttaskproperties.go new file mode 100644 index 00000000000..852dd6e0d46 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package patch + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/patch/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..9493a80b133 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/patch/model_queryexecutionresult.go new file mode 100644 index 00000000000..b2c4e3744c0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/patch/model_reportableexception.go new file mode 100644 index 00000000000..0e85c66fc31 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_reportableexception.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/patch/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..2914e9a856d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/patch/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..f3d0658d5f8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/patch/model_selectedcertificateinput.go new file mode 100644 index 00000000000..8dc8044408c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_serverproperties.go new file mode 100644 index 00000000000..29a096a2383 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_serverproperties.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_servicesku.go b/resource-manager/datamigration/2025-06-30/patch/model_servicesku.go new file mode 100644 index 00000000000..9d540a76dc1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_servicesku.go @@ -0,0 +1,12 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceSku struct { + Capacity *int64 `json:"capacity,omitempty"` + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/patch/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..a0c22504772 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..dd525c7d368 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/patch/model_ssismigrationinfo.go new file mode 100644 index 00000000000..5ce84439679 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/patch/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..8650fb9d1f2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/patch/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..0a253f62538 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..650e9e6e3b2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..89205aa0b4d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..541ac9bde53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..4e6ad01cb0f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..52c88c73506 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..bf432e0f5da --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..6200a538b1f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/patch/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..d6d1b9df2d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package patch + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..fbcbd801adc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/patch/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..e6f56c812c2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/patch/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..430440d2afe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_validationerror.go b/resource-manager/datamigration/2025-06-30/patch/model_validationerror.go new file mode 100644 index 00000000000..a60d21b10b6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_validationerror.go @@ -0,0 +1,9 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/patch/model_waitstatistics.go new file mode 100644 index 00000000000..7e91f123f6d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/model_waitstatistics.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/patch/version.go b/resource-manager/datamigration/2025-06-30/patch/version.go new file mode 100644 index 00000000000..43fff77948b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/patch/version.go @@ -0,0 +1,10 @@ +package patch + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/patch/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/post/README.md b/resource-manager/datamigration/2025-06-30/post/README.md new file mode 100644 index 00000000000..9cf24848327 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/README.md @@ -0,0 +1,187 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/post` Documentation + +The `post` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/post" +``` + + +### Client Initialization + +```go +client := post.NewPOSTClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `POSTClient.FilesRead` + +```go +ctx := context.TODO() +id := post.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesRead(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `POSTClient.FilesReadWrite` + +```go +ctx := context.TODO() +id := post.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesReadWrite(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `POSTClient.ServiceTasksCancel` + +```go +ctx := context.TODO() +id := post.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksCancel(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `POSTClient.ServicesCheckChildrenNameAvailability` + +```go +ctx := context.TODO() +id := post.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := post.NameAvailabilityRequest{ + // ... +} + + +read, err := client.ServicesCheckChildrenNameAvailability(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `POSTClient.ServicesCheckNameAvailability` + +```go +ctx := context.TODO() +id := post.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +payload := post.NameAvailabilityRequest{ + // ... +} + + +read, err := client.ServicesCheckNameAvailability(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `POSTClient.ServicesCheckStatus` + +```go +ctx := context.TODO() +id := post.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +read, err := client.ServicesCheckStatus(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `POSTClient.ServicesStart` + +```go +ctx := context.TODO() +id := post.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesStartThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `POSTClient.ServicesStop` + +```go +ctx := context.TODO() +id := post.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesStopThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `POSTClient.TasksCancel` + +```go +ctx := context.TODO() +id := post.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksCancel(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `POSTClient.TasksCommand` + +```go +ctx := context.TODO() +id := post.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := post.CommandProperties{ + // ... +} + + +read, err := client.TasksCommand(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/post/client.go b/resource-manager/datamigration/2025-06-30/post/client.go new file mode 100644 index 00000000000..5a71c067caa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/client.go @@ -0,0 +1,26 @@ +package post + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type POSTClient struct { + Client *resourcemanager.Client +} + +func NewPOSTClientWithBaseURI(sdkApi sdkEnv.Api) (*POSTClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "post", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating POSTClient: %+v", err) + } + + return &POSTClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/constants.go b/resource-manager/datamigration/2025-06-30/post/constants.go new file mode 100644 index 00000000000..522f8b5db05 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/constants.go @@ -0,0 +1,2146 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type NameCheckFailureReason string + +const ( + NameCheckFailureReasonAlreadyExists NameCheckFailureReason = "AlreadyExists" + NameCheckFailureReasonInvalid NameCheckFailureReason = "Invalid" +) + +func PossibleValuesForNameCheckFailureReason() []string { + return []string{ + string(NameCheckFailureReasonAlreadyExists), + string(NameCheckFailureReasonInvalid), + } +} + +func (s *NameCheckFailureReason) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNameCheckFailureReason(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNameCheckFailureReason(input string) (*NameCheckFailureReason, error) { + vals := map[string]NameCheckFailureReason{ + "alreadyexists": NameCheckFailureReasonAlreadyExists, + "invalid": NameCheckFailureReasonInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NameCheckFailureReason(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_file.go b/resource-manager/datamigration/2025-06-30/post/id_file.go new file mode 100644 index 00000000000..4715f5fe971 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_file.go @@ -0,0 +1,148 @@ +package post + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_file_test.go b/resource-manager/datamigration/2025-06-30/post/id_file_test.go new file mode 100644 index 00000000000..2bb4280e7cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_file_test.go @@ -0,0 +1,372 @@ +package post + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_location.go b/resource-manager/datamigration/2025-06-30/post/id_location.go new file mode 100644 index 00000000000..4df2f624f22 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_location.go @@ -0,0 +1,121 @@ +package post + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&LocationId{}) +} + +var _ resourceids.ResourceId = &LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + LocationName string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *LocationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + return nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.DataMigration/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_location_test.go b/resource-manager/datamigration/2025-06-30/post/id_location_test.go new file mode 100644 index 00000000000..e22dda8e247 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_location_test.go @@ -0,0 +1,237 @@ +package post + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs/lOcAtIoNnAmE", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs/lOcAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_service.go b/resource-manager/datamigration/2025-06-30/post/id_service.go new file mode 100644 index 00000000000..0bce91c9fd5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_service.go @@ -0,0 +1,130 @@ +package post + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_service_test.go b/resource-manager/datamigration/2025-06-30/post/id_service_test.go new file mode 100644 index 00000000000..bc8ef3953bb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_service_test.go @@ -0,0 +1,282 @@ +package post + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_servicetask.go b/resource-manager/datamigration/2025-06-30/post/id_servicetask.go new file mode 100644 index 00000000000..44e6d4cf22a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_servicetask.go @@ -0,0 +1,139 @@ +package post + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/post/id_servicetask_test.go new file mode 100644 index 00000000000..fe8a3fcba22 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_servicetask_test.go @@ -0,0 +1,327 @@ +package post + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_task.go b/resource-manager/datamigration/2025-06-30/post/id_task.go new file mode 100644 index 00000000000..fd9ef4542ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_task.go @@ -0,0 +1,148 @@ +package post + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/post/id_task_test.go b/resource-manager/datamigration/2025-06-30/post/id_task_test.go new file mode 100644 index 00000000000..6a7551d16b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/id_task_test.go @@ -0,0 +1,372 @@ +package post + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_filesread.go b/resource-manager/datamigration/2025-06-30/post/method_filesread.go new file mode 100644 index 00000000000..864032cd83f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_filesread.go @@ -0,0 +1,54 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesReadOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *FileStorageInfo +} + +// FilesRead ... +func (c POSTClient) FilesRead(ctx context.Context, id FileId) (result FilesReadOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/read", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model FileStorageInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_filesreadwrite.go b/resource-manager/datamigration/2025-06-30/post/method_filesreadwrite.go new file mode 100644 index 00000000000..ec9ef6445bc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_filesreadwrite.go @@ -0,0 +1,54 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesReadWriteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *FileStorageInfo +} + +// FilesReadWrite ... +func (c POSTClient) FilesReadWrite(ctx context.Context, id FileId) (result FilesReadWriteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/readwrite", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model FileStorageInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_servicescheckchildrennameavailability.go b/resource-manager/datamigration/2025-06-30/post/method_servicescheckchildrennameavailability.go new file mode 100644 index 00000000000..f17f5114bc1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_servicescheckchildrennameavailability.go @@ -0,0 +1,58 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCheckChildrenNameAvailabilityOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *NameAvailabilityResponse +} + +// ServicesCheckChildrenNameAvailability ... +func (c POSTClient) ServicesCheckChildrenNameAvailability(ctx context.Context, id ServiceId, input NameAvailabilityRequest) (result ServicesCheckChildrenNameAvailabilityOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkNameAvailability", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model NameAvailabilityResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_serviceschecknameavailability.go b/resource-manager/datamigration/2025-06-30/post/method_serviceschecknameavailability.go new file mode 100644 index 00000000000..8617015e25a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_serviceschecknameavailability.go @@ -0,0 +1,58 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCheckNameAvailabilityOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *NameAvailabilityResponse +} + +// ServicesCheckNameAvailability ... +func (c POSTClient) ServicesCheckNameAvailability(ctx context.Context, id LocationId, input NameAvailabilityRequest) (result ServicesCheckNameAvailabilityOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkNameAvailability", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model NameAvailabilityResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_servicescheckstatus.go b/resource-manager/datamigration/2025-06-30/post/method_servicescheckstatus.go new file mode 100644 index 00000000000..2b883a894f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_servicescheckstatus.go @@ -0,0 +1,54 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCheckStatusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationServiceStatusResponse +} + +// ServicesCheckStatus ... +func (c POSTClient) ServicesCheckStatus(ctx context.Context, id ServiceId) (result ServicesCheckStatusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkStatus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataMigrationServiceStatusResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_servicesstart.go b/resource-manager/datamigration/2025-06-30/post/method_servicesstart.go new file mode 100644 index 00000000000..7b519e83a1a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_servicesstart.go @@ -0,0 +1,70 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesStartOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ServicesStart ... +func (c POSTClient) ServicesStart(ctx context.Context, id ServiceId) (result ServicesStartOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/start", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesStartThenPoll performs ServicesStart then polls until it's completed +func (c POSTClient) ServicesStartThenPoll(ctx context.Context, id ServiceId) error { + result, err := c.ServicesStart(ctx, id) + if err != nil { + return fmt.Errorf("performing ServicesStart: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesStart: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_servicesstop.go b/resource-manager/datamigration/2025-06-30/post/method_servicesstop.go new file mode 100644 index 00000000000..f2d2c38a164 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_servicesstop.go @@ -0,0 +1,70 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesStopOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ServicesStop ... +func (c POSTClient) ServicesStop(ctx context.Context, id ServiceId) (result ServicesStopOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/stop", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesStopThenPoll performs ServicesStop then polls until it's completed +func (c POSTClient) ServicesStopThenPoll(ctx context.Context, id ServiceId) error { + result, err := c.ServicesStop(ctx, id) + if err != nil { + return fmt.Errorf("performing ServicesStop: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesStop: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_servicetaskscancel.go b/resource-manager/datamigration/2025-06-30/post/method_servicetaskscancel.go new file mode 100644 index 00000000000..cc350ec901d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_servicetaskscancel.go @@ -0,0 +1,54 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksCancelOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksCancel ... +func (c POSTClient) ServiceTasksCancel(ctx context.Context, id ServiceTaskId) (result ServiceTasksCancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_taskscancel.go b/resource-manager/datamigration/2025-06-30/post/method_taskscancel.go new file mode 100644 index 00000000000..0508294ce8e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_taskscancel.go @@ -0,0 +1,54 @@ +package post + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCancelOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksCancel ... +func (c POSTClient) TasksCancel(ctx context.Context, id TaskId) (result TasksCancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/method_taskscommand.go b/resource-manager/datamigration/2025-06-30/post/method_taskscommand.go new file mode 100644 index 00000000000..7d03184a39a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/method_taskscommand.go @@ -0,0 +1,63 @@ +package post + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCommandOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model CommandProperties +} + +// TasksCommand ... +func (c POSTClient) TasksCommand(ctx context.Context, id TaskId, input CommandProperties) (result TasksCommandOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/command", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var respObj json.RawMessage + if err = resp.Unmarshal(&respObj); err != nil { + return + } + model, err := UnmarshalCommandPropertiesImplementation(respObj) + if err != nil { + return + } + result.Model = model + + return +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/post/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..88e142efff4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/post/model_backupfileinfo.go new file mode 100644 index 00000000000..8a3d7a5a1ff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/post/model_backupsetinfo.go new file mode 100644 index 00000000000..cab42ab8799 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_blobshare.go b/resource-manager/datamigration/2025-06-30/post/model_blobshare.go new file mode 100644 index 00000000000..4c2376d4880 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_blobshare.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/post/model_commandproperties.go new file mode 100644 index 00000000000..9b1b174e0a0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_commandproperties.go @@ -0,0 +1,85 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_connectioninfo.go new file mode 100644 index 00000000000..700e7f47af0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connectioninfo.go @@ -0,0 +1,117 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..97dfa4edfbc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..d9700c17144 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..019b710c93a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..897f6b9adde --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..1cf73cb686d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..e60686ae97a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..9237fbba58b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..7de22cd2a10 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..6127f59fe7f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..2afafbbfd23 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..f216a1b82ba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..f5eaa3df916 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..73959673667 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..f347c3dc244 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..77e15308763 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..d680d3ff733 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..1249faffe10 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..56ec06ef5e1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..d241fb678ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..078fe8840fe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..3639ff5aaae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..3426f880f5d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..4f373b1417b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..79bc3447e2a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..9f1c4461440 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..4f91df1d35b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..6a715cda999 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..9fa68294ed7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..75153342fd2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..eecd138e26d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..40f0420b3cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..1d1606687d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..0884b58aad7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..e704dbcea98 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..11c4b19ce98 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..bc32be736c5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..e65c767bd6a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..103b6764f76 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..f2d1fcae978 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/post/model_databasebackupinfo.go new file mode 100644 index 00000000000..13b3a8069d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/post/model_databasefileinfo.go new file mode 100644 index 00000000000..9b97071588b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/post/model_databasesummaryresult.go new file mode 100644 index 00000000000..9845e4b14d6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_databasetable.go b/resource-manager/datamigration/2025-06-30/post/model_databasetable.go new file mode 100644 index 00000000000..7b6fcf0d2cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_databasetable.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/post/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..508d1bb1767 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/post/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..05366915a29 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_datamigrationservicestatusresponse.go b/resource-manager/datamigration/2025-06-30/post/model_datamigrationservicestatusresponse.go new file mode 100644 index 00000000000..2bc7f8d5c44 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_datamigrationservicestatusresponse.go @@ -0,0 +1,12 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceStatusResponse struct { + AgentConfiguration *interface{} `json:"agentConfiguration,omitempty"` + AgentVersion *string `json:"agentVersion,omitempty"` + Status *string `json:"status,omitempty"` + SupportedTaskTypes *[]string `json:"supportedTaskTypes,omitempty"` + VMSize *string `json:"vmSize,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/post/model_executionstatistics.go new file mode 100644 index 00000000000..9f01d0d7bf6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_executionstatistics.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_fileshare.go b/resource-manager/datamigration/2025-06-30/post/model_fileshare.go new file mode 100644 index 00000000000..ef974b61862 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_fileshare.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_filestorageinfo.go b/resource-manager/datamigration/2025-06-30/post/model_filestorageinfo.go new file mode 100644 index 00000000000..9b720034804 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_filestorageinfo.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileStorageInfo struct { + Headers *map[string]string `json:"headers,omitempty"` + Uri *string `json:"uri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..c4e9e8c8d62 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..df0821f5ef8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..b97b906246a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..ab34a0f9c06 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..04101025b99 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..c5b74508d0f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..13ae64afb07 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..38da278bee3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..1c3be5a5204 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..973bc8bba8a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..afcc78008d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..8f92aa09962 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..10830a59cb2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..5451bb93a80 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..09f4add77b2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..4d4232bb916 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..06b4321b13c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..ad0118f0529 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..d570aa2fe96 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..5f6869161d3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..1d7303569e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..05936920d44 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..f42c26e3a9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..16d9dd69872 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..4726607d35a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..9d781c2979e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..690b76772b4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..16f6ad8a7cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..f90f8cab1e5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..3ae6d70af11 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..17926a7a3a4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..7363548a55a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..f7163c7211f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..9cd34907694 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..2d2c75af116 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..ae21f4a44df --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..3c72d2fd90e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..b393ed0bfd9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..996f79982c3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..d7833fb886c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..30fb8b7f17a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..363ec8f7339 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..b2653f50605 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..8c97caa4c13 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..75cc0b8f556 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..f274f00c664 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..4d46547bfa9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..771b82d931b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..f5f55182e2f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..4e976f5245c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..e675da3e545 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..89ab754fc6b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..40ad7205a29 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..16e50d5436a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..7b712e85b72 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..a1aeb378845 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..3ce6cb83710 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..e16e6d12baf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..0140d3b5aa9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..b5031be5cb4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..7b694b09106 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..9eadd0837a2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..e01056b69f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..9e48a0f7329 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..fb285d9874e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..f7a1f5b625e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..1821ee69b19 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..787fb37cb00 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..c2197bcfc0c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..deb1a8ffc9b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..8c19410807b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..45d1434cabb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..f6daf907c39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..a0151768d37 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..36ff532f148 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..f96fbae75fe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..da06f475892 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..f2d3ea215b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..cb0c43cc609 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..f151b315d47 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..ee8a4344187 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..50b708f4f98 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..b1333f05f3c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..6bd7e8671e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..f4fea77c6b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..372bca38be3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..1b66f212f68 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..450dfcf13e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..e3e74bb6a66 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..85d4ecfe6cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..9e76b1ee13c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..68d9cfbd3d5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskinput.go new file mode 100644 index 00000000000..3094cc70b9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutput.go new file mode 100644 index 00000000000..1d1e41b1132 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..1540868ab39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..475ed339196 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskproperties.go new file mode 100644 index 00000000000..7eccc1c90ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..2b156b144e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..169748d6fa7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..1be496452d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/post/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..dfe26ee06ac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/post/model_migrationreportresult.go new file mode 100644 index 00000000000..a7237f4ca1c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..3f459a704af --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package post + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..01cefc0624d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationresult.go new file mode 100644 index 00000000000..bed85490171 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..e299612d26f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..9eaf865a4ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..3e650014440 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..c59c42025e6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package post + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..6255e09a331 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..ab557aada0c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "MongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..11633990899 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..bcf67812197 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package post + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..0f8e0aa187c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/post/model_mongodberror.go new file mode 100644 index 00000000000..bedb14f6bae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodberror.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..46c45542b12 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package post + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..d6c3e499a45 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbprogress.go new file mode 100644 index 00000000000..255619dc9e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..0594aa98dd3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..70b1509e6b9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..902e4eb4f14 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/post/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..c26b3d210af --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..421ffb50266 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_nameavailabilityrequest.go b/resource-manager/datamigration/2025-06-30/post/model_nameavailabilityrequest.go new file mode 100644 index 00000000000..4e3a13ba7c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_nameavailabilityrequest.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NameAvailabilityRequest struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_nameavailabilityresponse.go b/resource-manager/datamigration/2025-06-30/post/model_nameavailabilityresponse.go new file mode 100644 index 00000000000..791d5d8eba5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_nameavailabilityresponse.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NameAvailabilityResponse struct { + Message *string `json:"message,omitempty"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason *NameCheckFailureReason `json:"reason,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_odataerror.go b/resource-manager/datamigration/2025-06-30/post/model_odataerror.go new file mode 100644 index 00000000000..47b0f2a6841 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_odataerror.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..0e88115214d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/post/model_orphaneduserinfo.go new file mode 100644 index 00000000000..456b860ac73 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..dbd452d6745 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_projecttask.go b/resource-manager/datamigration/2025-06-30/post/model_projecttask.go new file mode 100644 index 00000000000..45fb12a6a39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_projecttask.go @@ -0,0 +1,56 @@ +package post + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_projecttaskproperties.go new file mode 100644 index 00000000000..e6c472273fb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package post + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/post/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..f59fa2e975d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/post/model_queryexecutionresult.go new file mode 100644 index 00000000000..99458e53e8d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/post/model_reportableexception.go new file mode 100644 index 00000000000..96cb08aecce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_reportableexception.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/post/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..ac94f24270d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/post/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..667a05006d5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/post/model_selectedcertificateinput.go new file mode 100644 index 00000000000..e5670d0910b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/post/model_serverproperties.go new file mode 100644 index 00000000000..47d2c498b30 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_serverproperties.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/post/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..1fe5ab1ce9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..2a462a6a620 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/post/model_ssismigrationinfo.go new file mode 100644 index 00000000000..f2fcf74a253 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/post/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..c473e42e761 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/post/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..a04df7d1556 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..68986753c67 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..5c05c6fb9b4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..4432d81ebe3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..3891899551e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..354f0c1625f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..54d88141580 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..4628f815a73 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/post/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..f502d487262 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package post + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..649f37019a1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/post/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..e640725798b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/post/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..69db3adfdfb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_validationerror.go b/resource-manager/datamigration/2025-06-30/post/model_validationerror.go new file mode 100644 index 00000000000..ead8edecd34 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_validationerror.go @@ -0,0 +1,9 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/post/model_waitstatistics.go new file mode 100644 index 00000000000..e74d55dc8ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/model_waitstatistics.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/post/version.go b/resource-manager/datamigration/2025-06-30/post/version.go new file mode 100644 index 00000000000..bb9c1a5d904 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/post/version.go @@ -0,0 +1,10 @@ +package post + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/post/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/README.md b/resource-manager/datamigration/2025-06-30/projectresource/README.md new file mode 100644 index 00000000000..9ee81ca1a3a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/README.md @@ -0,0 +1,111 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/projectresource` Documentation + +The `projectresource` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/projectresource" +``` + + +### Client Initialization + +```go +client := projectresource.NewProjectResourceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `ProjectResourceClient.ProjectsCreateOrUpdate` + +```go +ctx := context.TODO() +id := projectresource.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +payload := projectresource.Project{ + // ... +} + + +read, err := client.ProjectsCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ProjectResourceClient.ProjectsDelete` + +```go +ctx := context.TODO() +id := projectresource.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +read, err := client.ProjectsDelete(ctx, id, projectresource.DefaultProjectsDeleteOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ProjectResourceClient.ProjectsGet` + +```go +ctx := context.TODO() +id := projectresource.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +read, err := client.ProjectsGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ProjectResourceClient.ProjectsList` + +```go +ctx := context.TODO() +id := projectresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ProjectsList(ctx, id)` can be used to do batched pagination +items, err := client.ProjectsListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ProjectResourceClient.ProjectsUpdate` + +```go +ctx := context.TODO() +id := projectresource.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +payload := projectresource.Project{ + // ... +} + + +read, err := client.ProjectsUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/projectresource/client.go b/resource-manager/datamigration/2025-06-30/projectresource/client.go new file mode 100644 index 00000000000..cd47b992f3f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/client.go @@ -0,0 +1,26 @@ +package projectresource + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectResourceClient struct { + Client *resourcemanager.Client +} + +func NewProjectResourceClientWithBaseURI(sdkApi sdkEnv.Api) (*ProjectResourceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "projectresource", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating ProjectResourceClient: %+v", err) + } + + return &ProjectResourceClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/constants.go b/resource-manager/datamigration/2025-06-30/projectresource/constants.go new file mode 100644 index 00000000000..cf99d1198b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/constants.go @@ -0,0 +1,242 @@ +package projectresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type ProjectProvisioningState string + +const ( + ProjectProvisioningStateDeleting ProjectProvisioningState = "Deleting" + ProjectProvisioningStateSucceeded ProjectProvisioningState = "Succeeded" +) + +func PossibleValuesForProjectProvisioningState() []string { + return []string{ + string(ProjectProvisioningStateDeleting), + string(ProjectProvisioningStateSucceeded), + } +} + +func (s *ProjectProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectProvisioningState(input string) (*ProjectProvisioningState, error) { + vals := map[string]ProjectProvisioningState{ + "deleting": ProjectProvisioningStateDeleting, + "succeeded": ProjectProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectProvisioningState(input) + return &out, nil +} + +type ProjectSourcePlatform string + +const ( + ProjectSourcePlatformMongoDb ProjectSourcePlatform = "MongoDb" + ProjectSourcePlatformMySQL ProjectSourcePlatform = "MySQL" + ProjectSourcePlatformPostgreSql ProjectSourcePlatform = "PostgreSql" + ProjectSourcePlatformSQL ProjectSourcePlatform = "SQL" + ProjectSourcePlatformUnknown ProjectSourcePlatform = "Unknown" +) + +func PossibleValuesForProjectSourcePlatform() []string { + return []string{ + string(ProjectSourcePlatformMongoDb), + string(ProjectSourcePlatformMySQL), + string(ProjectSourcePlatformPostgreSql), + string(ProjectSourcePlatformSQL), + string(ProjectSourcePlatformUnknown), + } +} + +func (s *ProjectSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectSourcePlatform(input string) (*ProjectSourcePlatform, error) { + vals := map[string]ProjectSourcePlatform{ + "mongodb": ProjectSourcePlatformMongoDb, + "mysql": ProjectSourcePlatformMySQL, + "postgresql": ProjectSourcePlatformPostgreSql, + "sql": ProjectSourcePlatformSQL, + "unknown": ProjectSourcePlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectSourcePlatform(input) + return &out, nil +} + +type ProjectTargetPlatform string + +const ( + ProjectTargetPlatformAzureDbForMySql ProjectTargetPlatform = "AzureDbForMySql" + ProjectTargetPlatformAzureDbForPostgreSql ProjectTargetPlatform = "AzureDbForPostgreSql" + ProjectTargetPlatformMongoDb ProjectTargetPlatform = "MongoDb" + ProjectTargetPlatformSQLDB ProjectTargetPlatform = "SQLDB" + ProjectTargetPlatformSQLMI ProjectTargetPlatform = "SQLMI" + ProjectTargetPlatformUnknown ProjectTargetPlatform = "Unknown" +) + +func PossibleValuesForProjectTargetPlatform() []string { + return []string{ + string(ProjectTargetPlatformAzureDbForMySql), + string(ProjectTargetPlatformAzureDbForPostgreSql), + string(ProjectTargetPlatformMongoDb), + string(ProjectTargetPlatformSQLDB), + string(ProjectTargetPlatformSQLMI), + string(ProjectTargetPlatformUnknown), + } +} + +func (s *ProjectTargetPlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectTargetPlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectTargetPlatform(input string) (*ProjectTargetPlatform, error) { + vals := map[string]ProjectTargetPlatform{ + "azuredbformysql": ProjectTargetPlatformAzureDbForMySql, + "azuredbforpostgresql": ProjectTargetPlatformAzureDbForPostgreSql, + "mongodb": ProjectTargetPlatformMongoDb, + "sqldb": ProjectTargetPlatformSQLDB, + "sqlmi": ProjectTargetPlatformSQLMI, + "unknown": ProjectTargetPlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectTargetPlatform(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/id_project.go b/resource-manager/datamigration/2025-06-30/projectresource/id_project.go new file mode 100644 index 00000000000..09cc4be079c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/id_project.go @@ -0,0 +1,139 @@ +package projectresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/id_project_test.go b/resource-manager/datamigration/2025-06-30/projectresource/id_project_test.go new file mode 100644 index 00000000000..c961c1d4e0d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/id_project_test.go @@ -0,0 +1,327 @@ +package projectresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/id_service.go b/resource-manager/datamigration/2025-06-30/projectresource/id_service.go new file mode 100644 index 00000000000..049198dddc6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/id_service.go @@ -0,0 +1,130 @@ +package projectresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/id_service_test.go b/resource-manager/datamigration/2025-06-30/projectresource/id_service_test.go new file mode 100644 index 00000000000..76e86ac841c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/id_service_test.go @@ -0,0 +1,282 @@ +package projectresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/method_projectscreateorupdate.go b/resource-manager/datamigration/2025-06-30/projectresource/method_projectscreateorupdate.go new file mode 100644 index 00000000000..12aedec0328 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/method_projectscreateorupdate.go @@ -0,0 +1,58 @@ +package projectresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsCreateOrUpdate ... +func (c ProjectResourceClient) ProjectsCreateOrUpdate(ctx context.Context, id ProjectId, input Project) (result ProjectsCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/method_projectsdelete.go b/resource-manager/datamigration/2025-06-30/projectresource/method_projectsdelete.go new file mode 100644 index 00000000000..3de22d4ba25 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/method_projectsdelete.go @@ -0,0 +1,77 @@ +package projectresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type ProjectsDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultProjectsDeleteOperationOptions() ProjectsDeleteOperationOptions { + return ProjectsDeleteOperationOptions{} +} + +func (o ProjectsDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ProjectsDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ProjectsDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ProjectsDelete ... +func (c ProjectResourceClient) ProjectsDelete(ctx context.Context, id ProjectId, options ProjectsDeleteOperationOptions) (result ProjectsDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/method_projectsget.go b/resource-manager/datamigration/2025-06-30/projectresource/method_projectsget.go new file mode 100644 index 00000000000..53664662bf1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/method_projectsget.go @@ -0,0 +1,53 @@ +package projectresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsGet ... +func (c ProjectResourceClient) ProjectsGet(ctx context.Context, id ProjectId) (result ProjectsGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/method_projectslist.go b/resource-manager/datamigration/2025-06-30/projectresource/method_projectslist.go new file mode 100644 index 00000000000..d487d8bef5e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/method_projectslist.go @@ -0,0 +1,105 @@ +package projectresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Project +} + +type ProjectsListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Project +} + +type ProjectsListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ProjectsListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ProjectsList ... +func (c ProjectResourceClient) ProjectsList(ctx context.Context, id ServiceId) (result ProjectsListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ProjectsListCustomPager{}, + Path: fmt.Sprintf("%s/projects", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Project `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ProjectsListComplete retrieves all the results into a single object +func (c ProjectResourceClient) ProjectsListComplete(ctx context.Context, id ServiceId) (ProjectsListCompleteResult, error) { + return c.ProjectsListCompleteMatchingPredicate(ctx, id, ProjectOperationPredicate{}) +} + +// ProjectsListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ProjectResourceClient) ProjectsListCompleteMatchingPredicate(ctx context.Context, id ServiceId, predicate ProjectOperationPredicate) (result ProjectsListCompleteResult, err error) { + items := make([]Project, 0) + + resp, err := c.ProjectsList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ProjectsListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/method_projectsupdate.go b/resource-manager/datamigration/2025-06-30/projectresource/method_projectsupdate.go new file mode 100644 index 00000000000..01c4a4e72f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/method_projectsupdate.go @@ -0,0 +1,57 @@ +package projectresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsUpdate ... +func (c ProjectResourceClient) ProjectsUpdate(ctx context.Context, id ProjectId, input Project) (result ProjectsUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/projectresource/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..fc62540797e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package projectresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_connectioninfo.go new file mode 100644 index 00000000000..0465da61fa2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_connectioninfo.go @@ -0,0 +1,117 @@ +package projectresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "mongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_databaseinfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_databaseinfo.go new file mode 100644 index 00000000000..3c6987ed670 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_databaseinfo.go @@ -0,0 +1,8 @@ +package projectresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseInfo struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..9cb8f6683f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package projectresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..a3d142d43a1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package projectresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "mongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..ae9061af902 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package projectresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..2fd28b531c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package projectresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..d801dea67a8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package projectresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_project.go b/resource-manager/datamigration/2025-06-30/projectresource/model_project.go new file mode 100644 index 00000000000..e6bb76bf9e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_project.go @@ -0,0 +1,19 @@ +package projectresource + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Project struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_projectproperties.go b/resource-manager/datamigration/2025-06-30/projectresource/model_projectproperties.go new file mode 100644 index 00000000000..7b231edd1ee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_projectproperties.go @@ -0,0 +1,81 @@ +package projectresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectProperties struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourceConnectionInfo ConnectionInfo `json:"sourceConnectionInfo"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetConnectionInfo ConnectionInfo `json:"targetConnectionInfo"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` +} + +func (o *ProjectProperties) GetCreationTimeAsTime() (*time.Time, error) { + if o.CreationTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CreationTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectProperties) SetCreationTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationTime = &formatted +} + +var _ json.Unmarshaler = &ProjectProperties{} + +func (s *ProjectProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.AzureAuthenticationInfo = decoded.AzureAuthenticationInfo + s.CreationTime = decoded.CreationTime + s.DatabasesInfo = decoded.DatabasesInfo + s.ProvisioningState = decoded.ProvisioningState + s.SourcePlatform = decoded.SourcePlatform + s.TargetPlatform = decoded.TargetPlatform + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["sourceConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'SourceConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.SourceConnectionInfo = impl + } + + if v, ok := temp["targetConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'TargetConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.TargetConnectionInfo = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/projectresource/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..df82fc45249 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package projectresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/predicates.go b/resource-manager/datamigration/2025-06-30/projectresource/predicates.go new file mode 100644 index 00000000000..1c5cd5921bd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/predicates.go @@ -0,0 +1,37 @@ +package projectresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectOperationPredicate struct { + Etag *string + Id *string + Location *string + Name *string + Type *string +} + +func (p ProjectOperationPredicate) Matches(input Project) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/projectresource/version.go b/resource-manager/datamigration/2025-06-30/projectresource/version.go new file mode 100644 index 00000000000..96c6b34fcc0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/projectresource/version.go @@ -0,0 +1,10 @@ +package projectresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/projectresource/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/put/README.md b/resource-manager/datamigration/2025-06-30/put/README.md new file mode 100644 index 00000000000..f05ef405558 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/README.md @@ -0,0 +1,121 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/put` Documentation + +The `put` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/put" +``` + + +### Client Initialization + +```go +client := put.NewPUTClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `PUTClient.FilesCreateOrUpdate` + +```go +ctx := context.TODO() +id := put.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +payload := put.ProjectFile{ + // ... +} + + +read, err := client.FilesCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PUTClient.ProjectsCreateOrUpdate` + +```go +ctx := context.TODO() +id := put.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +payload := put.Project{ + // ... +} + + +read, err := client.ProjectsCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PUTClient.ServiceTasksCreateOrUpdate` + +```go +ctx := context.TODO() +id := put.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +payload := put.ProjectTask{ + // ... +} + + +read, err := client.ServiceTasksCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `PUTClient.ServicesCreateOrUpdate` + +```go +ctx := context.TODO() +id := put.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := put.DataMigrationService{ + // ... +} + + +if err := client.ServicesCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `PUTClient.TasksCreateOrUpdate` + +```go +ctx := context.TODO() +id := put.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := put.ProjectTask{ + // ... +} + + +read, err := client.TasksCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/put/client.go b/resource-manager/datamigration/2025-06-30/put/client.go new file mode 100644 index 00000000000..54fb640a8e1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/client.go @@ -0,0 +1,26 @@ +package put + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PUTClient struct { + Client *resourcemanager.Client +} + +func NewPUTClientWithBaseURI(sdkApi sdkEnv.Api) (*PUTClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "put", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating PUTClient: %+v", err) + } + + return &PUTClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/constants.go b/resource-manager/datamigration/2025-06-30/put/constants.go new file mode 100644 index 00000000000..b155678a691 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/constants.go @@ -0,0 +1,2314 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ProjectProvisioningState string + +const ( + ProjectProvisioningStateDeleting ProjectProvisioningState = "Deleting" + ProjectProvisioningStateSucceeded ProjectProvisioningState = "Succeeded" +) + +func PossibleValuesForProjectProvisioningState() []string { + return []string{ + string(ProjectProvisioningStateDeleting), + string(ProjectProvisioningStateSucceeded), + } +} + +func (s *ProjectProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectProvisioningState(input string) (*ProjectProvisioningState, error) { + vals := map[string]ProjectProvisioningState{ + "deleting": ProjectProvisioningStateDeleting, + "succeeded": ProjectProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectProvisioningState(input) + return &out, nil +} + +type ProjectSourcePlatform string + +const ( + ProjectSourcePlatformMongoDb ProjectSourcePlatform = "MongoDb" + ProjectSourcePlatformMySQL ProjectSourcePlatform = "MySQL" + ProjectSourcePlatformPostgreSql ProjectSourcePlatform = "PostgreSql" + ProjectSourcePlatformSQL ProjectSourcePlatform = "SQL" + ProjectSourcePlatformUnknown ProjectSourcePlatform = "Unknown" +) + +func PossibleValuesForProjectSourcePlatform() []string { + return []string{ + string(ProjectSourcePlatformMongoDb), + string(ProjectSourcePlatformMySQL), + string(ProjectSourcePlatformPostgreSql), + string(ProjectSourcePlatformSQL), + string(ProjectSourcePlatformUnknown), + } +} + +func (s *ProjectSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectSourcePlatform(input string) (*ProjectSourcePlatform, error) { + vals := map[string]ProjectSourcePlatform{ + "mongodb": ProjectSourcePlatformMongoDb, + "mysql": ProjectSourcePlatformMySQL, + "postgresql": ProjectSourcePlatformPostgreSql, + "sql": ProjectSourcePlatformSQL, + "unknown": ProjectSourcePlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectSourcePlatform(input) + return &out, nil +} + +type ProjectTargetPlatform string + +const ( + ProjectTargetPlatformAzureDbForMySql ProjectTargetPlatform = "AzureDbForMySql" + ProjectTargetPlatformAzureDbForPostgreSql ProjectTargetPlatform = "AzureDbForPostgreSql" + ProjectTargetPlatformMongoDb ProjectTargetPlatform = "MongoDb" + ProjectTargetPlatformSQLDB ProjectTargetPlatform = "SQLDB" + ProjectTargetPlatformSQLMI ProjectTargetPlatform = "SQLMI" + ProjectTargetPlatformUnknown ProjectTargetPlatform = "Unknown" +) + +func PossibleValuesForProjectTargetPlatform() []string { + return []string{ + string(ProjectTargetPlatformAzureDbForMySql), + string(ProjectTargetPlatformAzureDbForPostgreSql), + string(ProjectTargetPlatformMongoDb), + string(ProjectTargetPlatformSQLDB), + string(ProjectTargetPlatformSQLMI), + string(ProjectTargetPlatformUnknown), + } +} + +func (s *ProjectTargetPlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectTargetPlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectTargetPlatform(input string) (*ProjectTargetPlatform, error) { + vals := map[string]ProjectTargetPlatform{ + "azuredbformysql": ProjectTargetPlatformAzureDbForMySql, + "azuredbforpostgresql": ProjectTargetPlatformAzureDbForPostgreSql, + "mongodb": ProjectTargetPlatformMongoDb, + "sqldb": ProjectTargetPlatformSQLDB, + "sqlmi": ProjectTargetPlatformSQLMI, + "unknown": ProjectTargetPlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectTargetPlatform(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type ServiceProvisioningState string + +const ( + ServiceProvisioningStateAccepted ServiceProvisioningState = "Accepted" + ServiceProvisioningStateDeleting ServiceProvisioningState = "Deleting" + ServiceProvisioningStateDeploying ServiceProvisioningState = "Deploying" + ServiceProvisioningStateFailed ServiceProvisioningState = "Failed" + ServiceProvisioningStateFailedToStart ServiceProvisioningState = "FailedToStart" + ServiceProvisioningStateFailedToStop ServiceProvisioningState = "FailedToStop" + ServiceProvisioningStateStarting ServiceProvisioningState = "Starting" + ServiceProvisioningStateStopped ServiceProvisioningState = "Stopped" + ServiceProvisioningStateStopping ServiceProvisioningState = "Stopping" + ServiceProvisioningStateSucceeded ServiceProvisioningState = "Succeeded" +) + +func PossibleValuesForServiceProvisioningState() []string { + return []string{ + string(ServiceProvisioningStateAccepted), + string(ServiceProvisioningStateDeleting), + string(ServiceProvisioningStateDeploying), + string(ServiceProvisioningStateFailed), + string(ServiceProvisioningStateFailedToStart), + string(ServiceProvisioningStateFailedToStop), + string(ServiceProvisioningStateStarting), + string(ServiceProvisioningStateStopped), + string(ServiceProvisioningStateStopping), + string(ServiceProvisioningStateSucceeded), + } +} + +func (s *ServiceProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceProvisioningState(input string) (*ServiceProvisioningState, error) { + vals := map[string]ServiceProvisioningState{ + "accepted": ServiceProvisioningStateAccepted, + "deleting": ServiceProvisioningStateDeleting, + "deploying": ServiceProvisioningStateDeploying, + "failed": ServiceProvisioningStateFailed, + "failedtostart": ServiceProvisioningStateFailedToStart, + "failedtostop": ServiceProvisioningStateFailedToStop, + "starting": ServiceProvisioningStateStarting, + "stopped": ServiceProvisioningStateStopped, + "stopping": ServiceProvisioningStateStopping, + "succeeded": ServiceProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceProvisioningState(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_file.go b/resource-manager/datamigration/2025-06-30/put/id_file.go new file mode 100644 index 00000000000..0184b8a522b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_file.go @@ -0,0 +1,148 @@ +package put + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_file_test.go b/resource-manager/datamigration/2025-06-30/put/id_file_test.go new file mode 100644 index 00000000000..e3035736f4b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_file_test.go @@ -0,0 +1,372 @@ +package put + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_project.go b/resource-manager/datamigration/2025-06-30/put/id_project.go new file mode 100644 index 00000000000..36270603bda --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_project.go @@ -0,0 +1,139 @@ +package put + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_project_test.go b/resource-manager/datamigration/2025-06-30/put/id_project_test.go new file mode 100644 index 00000000000..9aba04de4ce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_project_test.go @@ -0,0 +1,327 @@ +package put + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_service.go b/resource-manager/datamigration/2025-06-30/put/id_service.go new file mode 100644 index 00000000000..d6c75873c69 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_service.go @@ -0,0 +1,130 @@ +package put + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_service_test.go b/resource-manager/datamigration/2025-06-30/put/id_service_test.go new file mode 100644 index 00000000000..0f9f94e2065 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_service_test.go @@ -0,0 +1,282 @@ +package put + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_servicetask.go b/resource-manager/datamigration/2025-06-30/put/id_servicetask.go new file mode 100644 index 00000000000..80ba8e2e78f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_servicetask.go @@ -0,0 +1,139 @@ +package put + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/put/id_servicetask_test.go new file mode 100644 index 00000000000..12d09905ede --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_servicetask_test.go @@ -0,0 +1,327 @@ +package put + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_task.go b/resource-manager/datamigration/2025-06-30/put/id_task.go new file mode 100644 index 00000000000..eec418bf518 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_task.go @@ -0,0 +1,148 @@ +package put + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/put/id_task_test.go b/resource-manager/datamigration/2025-06-30/put/id_task_test.go new file mode 100644 index 00000000000..7964500a4f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/id_task_test.go @@ -0,0 +1,372 @@ +package put + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/put/method_filescreateorupdate.go b/resource-manager/datamigration/2025-06-30/put/method_filescreateorupdate.go new file mode 100644 index 00000000000..4c766c41b56 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/method_filescreateorupdate.go @@ -0,0 +1,58 @@ +package put + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesCreateOrUpdate ... +func (c PUTClient) FilesCreateOrUpdate(ctx context.Context, id FileId, input ProjectFile) (result FilesCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/put/method_projectscreateorupdate.go b/resource-manager/datamigration/2025-06-30/put/method_projectscreateorupdate.go new file mode 100644 index 00000000000..1d765813367 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/method_projectscreateorupdate.go @@ -0,0 +1,58 @@ +package put + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsCreateOrUpdate ... +func (c PUTClient) ProjectsCreateOrUpdate(ctx context.Context, id ProjectId, input Project) (result ProjectsCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/put/method_servicescreateorupdate.go b/resource-manager/datamigration/2025-06-30/put/method_servicescreateorupdate.go new file mode 100644 index 00000000000..6372254f4e3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/method_servicescreateorupdate.go @@ -0,0 +1,76 @@ +package put + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesCreateOrUpdate ... +func (c PUTClient) ServicesCreateOrUpdate(ctx context.Context, id ServiceId, input DataMigrationService) (result ServicesCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesCreateOrUpdateThenPoll performs ServicesCreateOrUpdate then polls until it's completed +func (c PUTClient) ServicesCreateOrUpdateThenPoll(ctx context.Context, id ServiceId, input DataMigrationService) error { + result, err := c.ServicesCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ServicesCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/method_servicetaskscreateorupdate.go b/resource-manager/datamigration/2025-06-30/put/method_servicetaskscreateorupdate.go new file mode 100644 index 00000000000..bf9f7bac02c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/method_servicetaskscreateorupdate.go @@ -0,0 +1,58 @@ +package put + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksCreateOrUpdate ... +func (c PUTClient) ServiceTasksCreateOrUpdate(ctx context.Context, id ServiceTaskId, input ProjectTask) (result ServiceTasksCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/put/method_taskscreateorupdate.go b/resource-manager/datamigration/2025-06-30/put/method_taskscreateorupdate.go new file mode 100644 index 00000000000..a7ebdfd36e0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/method_taskscreateorupdate.go @@ -0,0 +1,58 @@ +package put + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksCreateOrUpdate ... +func (c PUTClient) TasksCreateOrUpdate(ctx context.Context, id TaskId, input ProjectTask) (result TasksCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/put/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..b8317c3d742 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/put/model_backupfileinfo.go new file mode 100644 index 00000000000..ad636d453a7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/put/model_backupsetinfo.go new file mode 100644 index 00000000000..7e66848b305 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_blobshare.go b/resource-manager/datamigration/2025-06-30/put/model_blobshare.go new file mode 100644 index 00000000000..3a2ae56ab06 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_blobshare.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/put/model_commandproperties.go new file mode 100644 index 00000000000..f48d2af31d2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_commandproperties.go @@ -0,0 +1,85 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_connectioninfo.go new file mode 100644 index 00000000000..aab36e3c9aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connectioninfo.go @@ -0,0 +1,117 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "mongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..6721b68546c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..afd9d187486 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..5cb7ed7db97 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..ba19d843c0a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..dc9581c8241 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..4f5f37c9322 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..54f8f148f95 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..3dacd3d28c3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..ee2cb0d06b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..2209b062e36 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..4ab43b88c29 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..ea8a780f9ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..84fa27d6dfd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..b31966008fc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..4da9519ffb4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..9ce662dcb2c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..b6f468412e5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..0b1fd45e71a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..94ea3850a7e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..e40010292cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..adf1b1bd0af --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..7033195274b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..0fd0163d703 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..30ef57372d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..37d8537b15c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..7add2a10162 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..cfe60d58b90 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..966bbe2a16c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..53b7d4b97ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..381442ea0f3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..2a5e758951f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..5e163ab3917 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..7ee371e0674 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..9135207e1b4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..c15684e6d1b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..9019e80e9f9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..eba9359b981 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..ca69a55e5a2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..246823762d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/put/model_databasebackupinfo.go new file mode 100644 index 00000000000..f5697e18c83 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/put/model_databasefileinfo.go new file mode 100644 index 00000000000..b687e3a128a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_databaseinfo.go b/resource-manager/datamigration/2025-06-30/put/model_databaseinfo.go new file mode 100644 index 00000000000..175d46242d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_databaseinfo.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseInfo struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/put/model_databasesummaryresult.go new file mode 100644 index 00000000000..6bddb9cd545 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_databasetable.go b/resource-manager/datamigration/2025-06-30/put/model_databasetable.go new file mode 100644 index 00000000000..b2adfefbe17 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_databasetable.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/put/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..2d04cc1c86a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/put/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..605754e1c06 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_datamigrationservice.go b/resource-manager/datamigration/2025-06-30/put/model_datamigrationservice.go new file mode 100644 index 00000000000..02fc17c318a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_datamigrationservice.go @@ -0,0 +1,21 @@ +package put + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationService struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataMigrationServiceProperties `json:"properties,omitempty"` + Sku *ServiceSku `json:"sku,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_datamigrationserviceproperties.go b/resource-manager/datamigration/2025-06-30/put/model_datamigrationserviceproperties.go new file mode 100644 index 00000000000..171cf2db194 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_datamigrationserviceproperties.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceProperties struct { + AutoStopDelay *string `json:"autoStopDelay,omitempty"` + DeleteResourcesOnStop *bool `json:"deleteResourcesOnStop,omitempty"` + ProvisioningState *ServiceProvisioningState `json:"provisioningState,omitempty"` + PublicKey *string `json:"publicKey,omitempty"` + VirtualNicId *string `json:"virtualNicId,omitempty"` + VirtualSubnetId *string `json:"virtualSubnetId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/put/model_executionstatistics.go new file mode 100644 index 00000000000..27890588ef5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_executionstatistics.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_fileshare.go b/resource-manager/datamigration/2025-06-30/put/model_fileshare.go new file mode 100644 index 00000000000..2b8a69826c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_fileshare.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..828c7ad2ba3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..3fc3e7234e3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..3329e4fb647 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..e42ba696d58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..1f6e679d275 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..2a24a88828d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..6a966f2481f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..6f8265a8f78 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..e298ebc16c0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..afe92d1a5d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..ad4a57cd2a9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..d2e42d79306 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..54843559d77 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..3e7b8b43ffc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..0ab67e91d79 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..be4251d3580 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..d80b4225b6e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..10bab9b02cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..795d9cdc4b4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..eaf2ffe9864 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..714e9355c86 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..4c6b64a179a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..a51acf5c5ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..9c7da2df19c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..5097fbe3a45 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..b41d1bb6228 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..796dd1a16dd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..b125ad3165b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..7037019fe7a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..2c412b9c100 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..122c7fd65c9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..abd00a56294 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..554cc5ec412 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..79ff8c89bc6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..3fb33911786 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..f814d4eed6f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..3dbce661525 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..3c7e12f08b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..47de8284133 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..1da4a7a598a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..049136f5ede --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..8c064f32303 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..02d1219e937 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..914e350ff23 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..98179cdfcb4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..890b21fad7f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..61e83ff0408 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..9a507e0a2f1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..c121ac4804a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..14eeba00eba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..99939987b33 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..e5a4906d2fb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..7231be8418c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..b3863da8eb0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..ef9d3374e34 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..dae8b7149d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..ccc6b86a2e3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..1dafc27078a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..1eb4dc2c617 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..0660f637f99 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..5caa8fb3df5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..b42ca0b20c5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..26601fc63de --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..9fd2d950f1f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..ee3c5e67bfa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..07b6add4760 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..df323915e34 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..d5a162ec370 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..70e15861566 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..3eb22f8b724 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..f015b1a7ae4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..b3ca5704a01 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..8ddc3e3906f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..5b85d41360d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..3762a38e686 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..8ab4b337948 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..41c7f251c38 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..097b85f92df --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..65846adda58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..ac3bbf0d893 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..ed8ca6e9a8f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..021d569d9a5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..efa133835f1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..970982a3fed --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..48ad7f841f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..f010c3d3af9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..cb8e244ba69 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..57663a77a99 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..3ab5071c47d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..77327143a9e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..a4db234f021 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..859b22a6ff7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskinput.go new file mode 100644 index 00000000000..56acd813b59 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutput.go new file mode 100644 index 00000000000..7607c074ba4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..ea74f2a723f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..1888569e064 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskproperties.go new file mode 100644 index 00000000000..0faf4d4bb20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..0e4d5c22a84 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..50804a72895 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..87773c8b5f1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/put/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..c5f9486d716 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/put/model_migrationreportresult.go new file mode 100644 index 00000000000..1c56c7cd64b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..c0ee9201725 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..5f6118cae5d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationresult.go new file mode 100644 index 00000000000..25e395c207f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..66426a78d94 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..29653602397 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..f1d3413680b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..af876c82058 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package put + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..be49b06c309 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..8f0fa9efafe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "mongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..5911226b8c2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..2757fa8fe4e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package put + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..748c7f41bce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/put/model_mongodberror.go new file mode 100644 index 00000000000..cf43ae1f456 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodberror.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..310f63f0163 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package put + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..053239b8266 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbprogress.go new file mode 100644 index 00000000000..cdf5a7db105 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..11a58520802 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..f1c9c454ec6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..af83ee4d71d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/put/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..0b7731ec4cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..908eef463df --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_odataerror.go b/resource-manager/datamigration/2025-06-30/put/model_odataerror.go new file mode 100644 index 00000000000..01afc02bf42 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_odataerror.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..e045b63cf5f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/put/model_orphaneduserinfo.go new file mode 100644 index 00000000000..3b94f23b0cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..90d9a3775f2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_project.go b/resource-manager/datamigration/2025-06-30/put/model_project.go new file mode 100644 index 00000000000..e4bdb34bf05 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_project.go @@ -0,0 +1,19 @@ +package put + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Project struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_projectfile.go b/resource-manager/datamigration/2025-06-30/put/model_projectfile.go new file mode 100644 index 00000000000..0964826b74a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_projectfile.go @@ -0,0 +1,17 @@ +package put + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFile struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectFileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_projectfileproperties.go b/resource-manager/datamigration/2025-06-30/put/model_projectfileproperties.go new file mode 100644 index 00000000000..146b39fd1f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_projectfileproperties.go @@ -0,0 +1,30 @@ +package put + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFileProperties struct { + Extension *string `json:"extension,omitempty"` + FilePath *string `json:"filePath,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + Size *int64 `json:"size,omitempty"` +} + +func (o *ProjectFileProperties) GetLastModifiedAsTime() (*time.Time, error) { + if o.LastModified == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModified, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectFileProperties) SetLastModifiedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModified = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_projectproperties.go b/resource-manager/datamigration/2025-06-30/put/model_projectproperties.go new file mode 100644 index 00000000000..cf91396ba6e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_projectproperties.go @@ -0,0 +1,81 @@ +package put + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectProperties struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourceConnectionInfo ConnectionInfo `json:"sourceConnectionInfo"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetConnectionInfo ConnectionInfo `json:"targetConnectionInfo"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` +} + +func (o *ProjectProperties) GetCreationTimeAsTime() (*time.Time, error) { + if o.CreationTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CreationTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectProperties) SetCreationTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationTime = &formatted +} + +var _ json.Unmarshaler = &ProjectProperties{} + +func (s *ProjectProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.AzureAuthenticationInfo = decoded.AzureAuthenticationInfo + s.CreationTime = decoded.CreationTime + s.DatabasesInfo = decoded.DatabasesInfo + s.ProvisioningState = decoded.ProvisioningState + s.SourcePlatform = decoded.SourcePlatform + s.TargetPlatform = decoded.TargetPlatform + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["sourceConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'SourceConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.SourceConnectionInfo = impl + } + + if v, ok := temp["targetConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'TargetConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.TargetConnectionInfo = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_projecttask.go b/resource-manager/datamigration/2025-06-30/put/model_projecttask.go new file mode 100644 index 00000000000..0d753598343 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_projecttask.go @@ -0,0 +1,56 @@ +package put + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_projecttaskproperties.go new file mode 100644 index 00000000000..04a77df5f80 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package put + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/put/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..e790f5275ff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/put/model_queryexecutionresult.go new file mode 100644 index 00000000000..b0c55507b7b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/put/model_reportableexception.go new file mode 100644 index 00000000000..2cc0a1a6803 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_reportableexception.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/put/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..fcfd67b4c59 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/put/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..8a6b1a50429 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/put/model_selectedcertificateinput.go new file mode 100644 index 00000000000..0820bd3e747 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/put/model_serverproperties.go new file mode 100644 index 00000000000..50325f36b4e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_serverproperties.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_servicesku.go b/resource-manager/datamigration/2025-06-30/put/model_servicesku.go new file mode 100644 index 00000000000..0b372403795 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_servicesku.go @@ -0,0 +1,12 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceSku struct { + Capacity *int64 `json:"capacity,omitempty"` + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/put/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..e9612320a47 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..577d1831074 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/put/model_ssismigrationinfo.go new file mode 100644 index 00000000000..e0407422f1b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/put/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..2c14c1419ef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/put/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..558ff258402 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..d9cdbe33d74 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..8a67c402b88 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..daa8aef9f49 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..b3e7d238bef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..176982f4800 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..2f24ef72729 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..83197c14c56 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/put/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..8ab857bb653 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package put + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..56d0c082fb9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/put/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..90fa1959f95 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/put/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..221405ce846 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_validationerror.go b/resource-manager/datamigration/2025-06-30/put/model_validationerror.go new file mode 100644 index 00000000000..488b77203ac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_validationerror.go @@ -0,0 +1,9 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/put/model_waitstatistics.go new file mode 100644 index 00000000000..18c38d1797a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/model_waitstatistics.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/put/version.go b/resource-manager/datamigration/2025-06-30/put/version.go new file mode 100644 index 00000000000..f0f5c666559 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/put/version.go @@ -0,0 +1,10 @@ +package put + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/put/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/README.md b/resource-manager/datamigration/2025-06-30/serviceresource/README.md new file mode 100644 index 00000000000..5e1b69dbf37 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/README.md @@ -0,0 +1,208 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/serviceresource` Documentation + +The `serviceresource` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/serviceresource" +``` + + +### Client Initialization + +```go +client := serviceresource.NewServiceResourceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `ServiceResourceClient.ServiceTasksList` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ServiceTasksList(ctx, id, serviceresource.DefaultServiceTasksListOperationOptions())` can be used to do batched pagination +items, err := client.ServiceTasksListComplete(ctx, id, serviceresource.DefaultServiceTasksListOperationOptions()) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesCheckStatus` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +read, err := client.ServicesCheckStatus(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesCreateOrUpdate` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := serviceresource.DataMigrationService{ + // ... +} + + +if err := client.ServicesCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesDelete` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesDeleteThenPoll(ctx, id, serviceresource.DefaultServicesDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesGet` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +read, err := client.ServicesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesList` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ServicesList(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesListByResourceGroup` + +```go +ctx := context.TODO() +id := serviceresource.NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName") + +// alternatively `client.ServicesListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesListSkus` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ServicesListSkus(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListSkusComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesStart` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesStartThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesStop` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesStopThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `ServiceResourceClient.ServicesUpdate` + +```go +ctx := context.TODO() +id := serviceresource.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := serviceresource.DataMigrationService{ + // ... +} + + +if err := client.ServicesUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `ServiceResourceClient.TasksList` + +```go +ctx := context.TODO() +id := serviceresource.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +// alternatively `client.TasksList(ctx, id, serviceresource.DefaultTasksListOperationOptions())` can be used to do batched pagination +items, err := client.TasksListComplete(ctx, id, serviceresource.DefaultTasksListOperationOptions()) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/client.go b/resource-manager/datamigration/2025-06-30/serviceresource/client.go new file mode 100644 index 00000000000..60618461d07 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/client.go @@ -0,0 +1,26 @@ +package serviceresource + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceResourceClient struct { + Client *resourcemanager.Client +} + +func NewServiceResourceClientWithBaseURI(sdkApi sdkEnv.Api) (*ServiceResourceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "serviceresource", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating ServiceResourceClient: %+v", err) + } + + return &ServiceResourceClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/constants.go b/resource-manager/datamigration/2025-06-30/serviceresource/constants.go new file mode 100644 index 00000000000..d9da792a5e5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/constants.go @@ -0,0 +1,2214 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type ServiceProvisioningState string + +const ( + ServiceProvisioningStateAccepted ServiceProvisioningState = "Accepted" + ServiceProvisioningStateDeleting ServiceProvisioningState = "Deleting" + ServiceProvisioningStateDeploying ServiceProvisioningState = "Deploying" + ServiceProvisioningStateFailed ServiceProvisioningState = "Failed" + ServiceProvisioningStateFailedToStart ServiceProvisioningState = "FailedToStart" + ServiceProvisioningStateFailedToStop ServiceProvisioningState = "FailedToStop" + ServiceProvisioningStateStarting ServiceProvisioningState = "Starting" + ServiceProvisioningStateStopped ServiceProvisioningState = "Stopped" + ServiceProvisioningStateStopping ServiceProvisioningState = "Stopping" + ServiceProvisioningStateSucceeded ServiceProvisioningState = "Succeeded" +) + +func PossibleValuesForServiceProvisioningState() []string { + return []string{ + string(ServiceProvisioningStateAccepted), + string(ServiceProvisioningStateDeleting), + string(ServiceProvisioningStateDeploying), + string(ServiceProvisioningStateFailed), + string(ServiceProvisioningStateFailedToStart), + string(ServiceProvisioningStateFailedToStop), + string(ServiceProvisioningStateStarting), + string(ServiceProvisioningStateStopped), + string(ServiceProvisioningStateStopping), + string(ServiceProvisioningStateSucceeded), + } +} + +func (s *ServiceProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceProvisioningState(input string) (*ServiceProvisioningState, error) { + vals := map[string]ServiceProvisioningState{ + "accepted": ServiceProvisioningStateAccepted, + "deleting": ServiceProvisioningStateDeleting, + "deploying": ServiceProvisioningStateDeploying, + "failed": ServiceProvisioningStateFailed, + "failedtostart": ServiceProvisioningStateFailedToStart, + "failedtostop": ServiceProvisioningStateFailedToStop, + "starting": ServiceProvisioningStateStarting, + "stopped": ServiceProvisioningStateStopped, + "stopping": ServiceProvisioningStateStopping, + "succeeded": ServiceProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceProvisioningState(input) + return &out, nil +} + +type ServiceScalability string + +const ( + ServiceScalabilityAutomatic ServiceScalability = "automatic" + ServiceScalabilityManual ServiceScalability = "manual" + ServiceScalabilityNone ServiceScalability = "none" +) + +func PossibleValuesForServiceScalability() []string { + return []string{ + string(ServiceScalabilityAutomatic), + string(ServiceScalabilityManual), + string(ServiceScalabilityNone), + } +} + +func (s *ServiceScalability) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceScalability(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceScalability(input string) (*ServiceScalability, error) { + vals := map[string]ServiceScalability{ + "automatic": ServiceScalabilityAutomatic, + "manual": ServiceScalabilityManual, + "none": ServiceScalabilityNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceScalability(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/id_project.go b/resource-manager/datamigration/2025-06-30/serviceresource/id_project.go new file mode 100644 index 00000000000..e4d20637d8d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/id_project.go @@ -0,0 +1,139 @@ +package serviceresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/id_project_test.go b/resource-manager/datamigration/2025-06-30/serviceresource/id_project_test.go new file mode 100644 index 00000000000..fe7c85f1c55 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/id_project_test.go @@ -0,0 +1,327 @@ +package serviceresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/id_service.go b/resource-manager/datamigration/2025-06-30/serviceresource/id_service.go new file mode 100644 index 00000000000..fbd1e64a55c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/id_service.go @@ -0,0 +1,130 @@ +package serviceresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/id_service_test.go b/resource-manager/datamigration/2025-06-30/serviceresource/id_service_test.go new file mode 100644 index 00000000000..e603b45396a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/id_service_test.go @@ -0,0 +1,282 @@ +package serviceresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/id_subscriptionresourcegroup.go b/resource-manager/datamigration/2025-06-30/serviceresource/id_subscriptionresourcegroup.go new file mode 100644 index 00000000000..75f44d843bd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/id_subscriptionresourcegroup.go @@ -0,0 +1,119 @@ +package serviceresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SubscriptionResourceGroupId{}) +} + +var _ resourceids.ResourceId = &SubscriptionResourceGroupId{} + +// SubscriptionResourceGroupId is a struct representing the Resource ID for a Subscription Resource Group +type SubscriptionResourceGroupId struct { + SubscriptionId string + ResourceGroupName string +} + +// NewSubscriptionResourceGroupID returns a new SubscriptionResourceGroupId struct +func NewSubscriptionResourceGroupID(subscriptionId string, resourceGroupName string) SubscriptionResourceGroupId { + return SubscriptionResourceGroupId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + } +} + +// ParseSubscriptionResourceGroupID parses 'input' into a SubscriptionResourceGroupId +func ParseSubscriptionResourceGroupID(input string) (*SubscriptionResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubscriptionResourceGroupId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubscriptionResourceGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSubscriptionResourceGroupIDInsensitively parses 'input' case-insensitively into a SubscriptionResourceGroupId +// note: this method should only be used for API response data and not user input +func ParseSubscriptionResourceGroupIDInsensitively(input string) (*SubscriptionResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubscriptionResourceGroupId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubscriptionResourceGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SubscriptionResourceGroupId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + return nil +} + +// ValidateSubscriptionResourceGroupID checks that 'input' can be parsed as a Subscription Resource Group ID +func ValidateSubscriptionResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSubscriptionResourceGroupID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Subscription Resource Group ID +func (id SubscriptionResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription Resource Group ID +func (id SubscriptionResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + } +} + +// String returns a human-readable description of this Subscription Resource Group ID +func (id SubscriptionResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Subscription Resource Group (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/id_subscriptionresourcegroup_test.go b/resource-manager/datamigration/2025-06-30/serviceresource/id_subscriptionresourcegroup_test.go new file mode 100644 index 00000000000..16a1e877f58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/id_subscriptionresourcegroup_test.go @@ -0,0 +1,207 @@ +package serviceresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SubscriptionResourceGroupId{} + +func TestNewSubscriptionResourceGroupID(t *testing.T) { + id := NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } +} + +func TestFormatSubscriptionResourceGroupID(t *testing.T) { + actual := NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSubscriptionResourceGroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionResourceGroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} + +func TestParseSubscriptionResourceGroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionResourceGroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} + +func TestSegmentsForSubscriptionResourceGroupId(t *testing.T) { + segments := SubscriptionResourceGroupId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SubscriptionResourceGroupId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicescheckstatus.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicescheckstatus.go new file mode 100644 index 00000000000..33f7180c4ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicescheckstatus.go @@ -0,0 +1,54 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCheckStatusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationServiceStatusResponse +} + +// ServicesCheckStatus ... +func (c ServiceResourceClient) ServicesCheckStatus(ctx context.Context, id ServiceId) (result ServicesCheckStatusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkStatus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataMigrationServiceStatusResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicescreateorupdate.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicescreateorupdate.go new file mode 100644 index 00000000000..69f62aabe30 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicescreateorupdate.go @@ -0,0 +1,76 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesCreateOrUpdate ... +func (c ServiceResourceClient) ServicesCreateOrUpdate(ctx context.Context, id ServiceId, input DataMigrationService) (result ServicesCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesCreateOrUpdateThenPoll performs ServicesCreateOrUpdate then polls until it's completed +func (c ServiceResourceClient) ServicesCreateOrUpdateThenPoll(ctx context.Context, id ServiceId, input DataMigrationService) error { + result, err := c.ServicesCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ServicesCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesdelete.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesdelete.go new file mode 100644 index 00000000000..32630ee1f93 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesdelete.go @@ -0,0 +1,100 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type ServicesDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultServicesDeleteOperationOptions() ServicesDeleteOperationOptions { + return ServicesDeleteOperationOptions{} +} + +func (o ServicesDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServicesDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServicesDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ServicesDelete ... +func (c ServiceResourceClient) ServicesDelete(ctx context.Context, id ServiceId, options ServicesDeleteOperationOptions) (result ServicesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesDeleteThenPoll performs ServicesDelete then polls until it's completed +func (c ServiceResourceClient) ServicesDeleteThenPoll(ctx context.Context, id ServiceId, options ServicesDeleteOperationOptions) error { + result, err := c.ServicesDelete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing ServicesDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesget.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesget.go new file mode 100644 index 00000000000..0e7a2126e5e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesget.go @@ -0,0 +1,53 @@ +package serviceresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesGet ... +func (c ServiceResourceClient) ServicesGet(ctx context.Context, id ServiceId) (result ServicesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataMigrationService + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslist.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslist.go new file mode 100644 index 00000000000..5950ce5f2b0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslist.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataMigrationService +} + +type ServicesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataMigrationService +} + +type ServicesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesList ... +func (c ServiceResourceClient) ServicesList(ctx context.Context, id commonids.SubscriptionId) (result ServicesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/services", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListComplete retrieves all the results into a single object +func (c ServiceResourceClient) ServicesListComplete(ctx context.Context, id commonids.SubscriptionId) (ServicesListCompleteResult, error) { + return c.ServicesListCompleteMatchingPredicate(ctx, id, DataMigrationServiceOperationPredicate{}) +} + +// ServicesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ServiceResourceClient) ServicesListCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate DataMigrationServiceOperationPredicate) (result ServicesListCompleteResult, err error) { + items := make([]DataMigrationService, 0) + + resp, err := c.ServicesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslistbyresourcegroup.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslistbyresourcegroup.go new file mode 100644 index 00000000000..79f192d0dae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslistbyresourcegroup.go @@ -0,0 +1,105 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataMigrationService +} + +type ServicesListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataMigrationService +} + +type ServicesListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesListByResourceGroup ... +func (c ServiceResourceClient) ServicesListByResourceGroup(ctx context.Context, id SubscriptionResourceGroupId) (result ServicesListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/services", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListByResourceGroupComplete retrieves all the results into a single object +func (c ServiceResourceClient) ServicesListByResourceGroupComplete(ctx context.Context, id SubscriptionResourceGroupId) (ServicesListByResourceGroupCompleteResult, error) { + return c.ServicesListByResourceGroupCompleteMatchingPredicate(ctx, id, DataMigrationServiceOperationPredicate{}) +} + +// ServicesListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ServiceResourceClient) ServicesListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id SubscriptionResourceGroupId, predicate DataMigrationServiceOperationPredicate) (result ServicesListByResourceGroupCompleteResult, err error) { + items := make([]DataMigrationService, 0) + + resp, err := c.ServicesListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslistskus.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslistskus.go new file mode 100644 index 00000000000..3275bc11ba2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_serviceslistskus.go @@ -0,0 +1,105 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListSkusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]AvailableServiceSku +} + +type ServicesListSkusCompleteResult struct { + LatestHttpResponse *http.Response + Items []AvailableServiceSku +} + +type ServicesListSkusCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListSkusCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesListSkus ... +func (c ServiceResourceClient) ServicesListSkus(ctx context.Context, id ServiceId) (result ServicesListSkusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListSkusCustomPager{}, + Path: fmt.Sprintf("%s/skus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]AvailableServiceSku `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListSkusComplete retrieves all the results into a single object +func (c ServiceResourceClient) ServicesListSkusComplete(ctx context.Context, id ServiceId) (ServicesListSkusCompleteResult, error) { + return c.ServicesListSkusCompleteMatchingPredicate(ctx, id, AvailableServiceSkuOperationPredicate{}) +} + +// ServicesListSkusCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ServiceResourceClient) ServicesListSkusCompleteMatchingPredicate(ctx context.Context, id ServiceId, predicate AvailableServiceSkuOperationPredicate) (result ServicesListSkusCompleteResult, err error) { + items := make([]AvailableServiceSku, 0) + + resp, err := c.ServicesListSkus(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListSkusCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesstart.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesstart.go new file mode 100644 index 00000000000..df1b1ea6856 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesstart.go @@ -0,0 +1,70 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesStartOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ServicesStart ... +func (c ServiceResourceClient) ServicesStart(ctx context.Context, id ServiceId) (result ServicesStartOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/start", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesStartThenPoll performs ServicesStart then polls until it's completed +func (c ServiceResourceClient) ServicesStartThenPoll(ctx context.Context, id ServiceId) error { + result, err := c.ServicesStart(ctx, id) + if err != nil { + return fmt.Errorf("performing ServicesStart: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesStart: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesstop.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesstop.go new file mode 100644 index 00000000000..7b7efe8691e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesstop.go @@ -0,0 +1,70 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesStopOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// ServicesStop ... +func (c ServiceResourceClient) ServicesStop(ctx context.Context, id ServiceId) (result ServicesStopOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/stop", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesStopThenPoll performs ServicesStop then polls until it's completed +func (c ServiceResourceClient) ServicesStopThenPoll(ctx context.Context, id ServiceId) error { + result, err := c.ServicesStop(ctx, id) + if err != nil { + return fmt.Errorf("performing ServicesStop: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesStop: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesupdate.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesupdate.go new file mode 100644 index 00000000000..a58bf26ecc8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicesupdate.go @@ -0,0 +1,75 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesUpdate ... +func (c ServiceResourceClient) ServicesUpdate(ctx context.Context, id ServiceId, input DataMigrationService) (result ServicesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesUpdateThenPoll performs ServicesUpdate then polls until it's completed +func (c ServiceResourceClient) ServicesUpdateThenPoll(ctx context.Context, id ServiceId, input DataMigrationService) error { + result, err := c.ServicesUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ServicesUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_servicetaskslist.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicetaskslist.go new file mode 100644 index 00000000000..2909fd64be7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_servicetaskslist.go @@ -0,0 +1,134 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectTask +} + +type ServiceTasksListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectTask +} + +type ServiceTasksListOperationOptions struct { + TaskType *string +} + +func DefaultServiceTasksListOperationOptions() ServiceTasksListOperationOptions { + return ServiceTasksListOperationOptions{} +} + +func (o ServiceTasksListOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksListOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksListOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.TaskType != nil { + out.Append("taskType", fmt.Sprintf("%v", *o.TaskType)) + } + return &out +} + +type ServiceTasksListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServiceTasksListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServiceTasksList ... +func (c ServiceResourceClient) ServiceTasksList(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions) (result ServiceTasksListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Pager: &ServiceTasksListCustomPager{}, + Path: fmt.Sprintf("%s/serviceTasks", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectTask `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServiceTasksListComplete retrieves all the results into a single object +func (c ServiceResourceClient) ServiceTasksListComplete(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions) (ServiceTasksListCompleteResult, error) { + return c.ServiceTasksListCompleteMatchingPredicate(ctx, id, options, ProjectTaskOperationPredicate{}) +} + +// ServiceTasksListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ServiceResourceClient) ServiceTasksListCompleteMatchingPredicate(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions, predicate ProjectTaskOperationPredicate) (result ServiceTasksListCompleteResult, err error) { + items := make([]ProjectTask, 0) + + resp, err := c.ServiceTasksList(ctx, id, options) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServiceTasksListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/method_taskslist.go b/resource-manager/datamigration/2025-06-30/serviceresource/method_taskslist.go new file mode 100644 index 00000000000..6628f689637 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/method_taskslist.go @@ -0,0 +1,134 @@ +package serviceresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectTask +} + +type TasksListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectTask +} + +type TasksListOperationOptions struct { + TaskType *string +} + +func DefaultTasksListOperationOptions() TasksListOperationOptions { + return TasksListOperationOptions{} +} + +func (o TasksListOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksListOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksListOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.TaskType != nil { + out.Append("taskType", fmt.Sprintf("%v", *o.TaskType)) + } + return &out +} + +type TasksListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *TasksListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// TasksList ... +func (c ServiceResourceClient) TasksList(ctx context.Context, id ProjectId, options TasksListOperationOptions) (result TasksListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Pager: &TasksListCustomPager{}, + Path: fmt.Sprintf("%s/tasks", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectTask `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// TasksListComplete retrieves all the results into a single object +func (c ServiceResourceClient) TasksListComplete(ctx context.Context, id ProjectId, options TasksListOperationOptions) (TasksListCompleteResult, error) { + return c.TasksListCompleteMatchingPredicate(ctx, id, options, ProjectTaskOperationPredicate{}) +} + +// TasksListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c ServiceResourceClient) TasksListCompleteMatchingPredicate(ctx context.Context, id ProjectId, options TasksListOperationOptions, predicate ProjectTaskOperationPredicate) (result TasksListCompleteResult, err error) { + items := make([]ProjectTask, 0) + + resp, err := c.TasksList(ctx, id, options) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = TasksListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_availableservicesku.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_availableservicesku.go new file mode 100644 index 00000000000..7fc9caf0792 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_availableservicesku.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSku struct { + Capacity *AvailableServiceSkuCapacity `json:"capacity,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + Sku *AvailableServiceSkuSku `json:"sku,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_availableserviceskucapacity.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_availableserviceskucapacity.go new file mode 100644 index 00000000000..7b2aafe00d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_availableserviceskucapacity.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuCapacity struct { + Default *int64 `json:"default,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` + ScaleType *ServiceScalability `json:"scaleType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_availableserviceskusku.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_availableserviceskusku.go new file mode 100644 index 00000000000..d40ccab7baf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_availableserviceskusku.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuSku struct { + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..cf11f72bf64 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_backupfileinfo.go new file mode 100644 index 00000000000..f84cee9b6aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_backupsetinfo.go new file mode 100644 index 00000000000..a7b98966641 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_blobshare.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_blobshare.go new file mode 100644 index 00000000000..54e615a6521 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_blobshare.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_commandproperties.go new file mode 100644 index 00000000000..2ebca6e18e0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_commandproperties.go @@ -0,0 +1,85 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connectioninfo.go new file mode 100644 index 00000000000..3047b7e0a50 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connectioninfo.go @@ -0,0 +1,117 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..4bb10699bdc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..05582aa01af --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..3d72552dfba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..92ae3685bab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..fd7abcee46b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..8c6070529d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..bead2092398 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..cba73eeb397 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..aab806bfe26 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..77f672b1aa6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..43201184900 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..8c852dd099d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..a6311bb3b7f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..f906299dcce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..ef849eede7e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..e2fdb8f6457 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..823eb625641 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..025647f9393 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..63b725241a7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..6df3bcfd815 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..7e6a5560372 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..9dfa5d6bf74 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..eea1f392637 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..f4b641dca45 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..b9dae40d076 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..3e234f30816 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..bf8ddadda3e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..5bb22165048 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..f07d548eaef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..e029b2b4398 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..512ae5b62be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..e7028d76d73 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..e44dd125bc6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..212557497ee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..2067b0b3c0a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..575405e870e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..fd64139cfe4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..d0722818c00 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..9a388bcb78a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasebackupinfo.go new file mode 100644 index 00000000000..0ae14cc0724 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasefileinfo.go new file mode 100644 index 00000000000..356fbadeac2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasesummaryresult.go new file mode 100644 index 00000000000..0c37373d905 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_databasetable.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasetable.go new file mode 100644 index 00000000000..31479df4f85 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_databasetable.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..e0c30e97b54 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..e8e51116317 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationservice.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationservice.go new file mode 100644 index 00000000000..2dbba2c0cb8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationservice.go @@ -0,0 +1,21 @@ +package serviceresource + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationService struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataMigrationServiceProperties `json:"properties,omitempty"` + Sku *ServiceSku `json:"sku,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationserviceproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationserviceproperties.go new file mode 100644 index 00000000000..aa3eacfb4ea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationserviceproperties.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceProperties struct { + AutoStopDelay *string `json:"autoStopDelay,omitempty"` + DeleteResourcesOnStop *bool `json:"deleteResourcesOnStop,omitempty"` + ProvisioningState *ServiceProvisioningState `json:"provisioningState,omitempty"` + PublicKey *string `json:"publicKey,omitempty"` + VirtualNicId *string `json:"virtualNicId,omitempty"` + VirtualSubnetId *string `json:"virtualSubnetId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationservicestatusresponse.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationservicestatusresponse.go new file mode 100644 index 00000000000..68229aec7ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_datamigrationservicestatusresponse.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceStatusResponse struct { + AgentConfiguration *interface{} `json:"agentConfiguration,omitempty"` + AgentVersion *string `json:"agentVersion,omitempty"` + Status *string `json:"status,omitempty"` + SupportedTaskTypes *[]string `json:"supportedTaskTypes,omitempty"` + VMSize *string `json:"vmSize,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_executionstatistics.go new file mode 100644 index 00000000000..f7efbaeae20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_executionstatistics.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_fileshare.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_fileshare.go new file mode 100644 index 00000000000..7b81618d1b5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_fileshare.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..2f9f3c6a8bc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..46b93a57dda --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..37f04c83b6d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..ffc898bbbbf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..125ca087942 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..879affdb165 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..8f77833c314 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..38a09adde19 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..20a8e30031c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..74997550e1e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..8c4fcdc57e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..1f38adeb83a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..ec0886b891b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..39bc3e4fe88 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..0dec8a173af --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..29b17200b5f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..08d53705517 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..a2c890165b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..735b2fc909b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..4116c2922e8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..b4afad90131 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..48f623c314b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..23f9c78b759 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..cea0ebf68d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..da42cc25936 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..7dde2d087f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..a1d8b7eac39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..e9813e93184 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..f710ed66943 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..e95784b220b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..b4471b68159 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..ab13fdfe61d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..d94bab64de9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..e3be4e6b6f6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..d71394cecce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..eb62e80dc9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..72c4f437fc1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..9926c03eb68 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..3d9712f2af2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..51b86d3a952 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..2c3e79bf4b7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..ccd2fe4fb16 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..a0baa1f50e1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..f534d3ead8f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..e9832b38918 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..8682332631f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..e1d23786f10 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..2bd3277e42e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..3afbbd99ca8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..04cb6c5004a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..7770f978646 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..78254fdd4dc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..a2a89226448 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..dc618987a0c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..a2c3f071635 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..4a23fb1cbc6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..a1aade83f11 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..35019e63ace --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..cf0c0097026 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..07803d6ac20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..6eacddab69a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..ef0b076a3de --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..8b6197a184e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..423d4094b37 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..1864f633c09 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..f9af4ab6789 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..ec5a846f24b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..018b2aeb02c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..ab7db2b92bc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..fd15219ef2e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..f682e5ae9b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..1f6a5242ea3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..13ebf0ab390 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..240d4fcb7ec --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..dd04d560bdd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..a4a211e9297 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..bd091e1ae06 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..ed08b582df1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..92e482f0285 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..1951f580d93 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..36e25908abb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..73921508bfc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..504ad07dac1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..eba690daf4f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..a59deb72bb3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..0cd98305a42 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..06f17593f0d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..198a8d9856e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..205442265bb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..6555f0a59ef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..0757ad6029f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..a0f6c47858f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskinput.go new file mode 100644 index 00000000000..723cd4eb282 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutput.go new file mode 100644 index 00000000000..8f6405b8c5d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..0610dd47de5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..d4bfffec361 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskproperties.go new file mode 100644 index 00000000000..0f9ffbf5fb0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..50e5e2d8513 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..3641462f295 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..b33fc93aa2e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..740ce1fb64f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationreportresult.go new file mode 100644 index 00000000000..b160d64635e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..da7281862b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package serviceresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..6c4dff54366 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationresult.go new file mode 100644 index 00000000000..459d57afcd4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..ff68cfa6218 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..43ecbab490f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..1ff798a5545 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..9e3a73cf216 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..08ac1914c18 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..3a521195e00 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "MongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..f1ac51cf1e8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..699ebc17291 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..e9015c6aa56 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodberror.go new file mode 100644 index 00000000000..648d454ecb1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodberror.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..64184e426c2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..69fa2bdd9b2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbprogress.go new file mode 100644 index 00000000000..2209d4fb7ba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..711877ce16f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..bec5b7b8d9b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..f8ffde0dd9e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..fbd4539de57 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..44051b03304 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_odataerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_odataerror.go new file mode 100644 index 00000000000..6cee64cdc75 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_odataerror.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..df65867f05d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_orphaneduserinfo.go new file mode 100644 index 00000000000..2435c6b6c83 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..bb3f39e3f7b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_projecttask.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_projecttask.go new file mode 100644 index 00000000000..ac713335b18 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_projecttask.go @@ -0,0 +1,56 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_projecttaskproperties.go new file mode 100644 index 00000000000..bce3038b56d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..881b11caf97 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_queryexecutionresult.go new file mode 100644 index 00000000000..37c8bcb8a83 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_reportableexception.go new file mode 100644 index 00000000000..6796e828608 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_reportableexception.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..713b1d2631c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..24cb5a6a5a4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_selectedcertificateinput.go new file mode 100644 index 00000000000..4fefe71eaeb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_serverproperties.go new file mode 100644 index 00000000000..06cc6f435f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_serverproperties.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_servicesku.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_servicesku.go new file mode 100644 index 00000000000..b384ff13edc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_servicesku.go @@ -0,0 +1,12 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceSku struct { + Capacity *int64 `json:"capacity,omitempty"` + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..8cd897c237d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..e6580645769 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_ssismigrationinfo.go new file mode 100644 index 00000000000..498cced4bcc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..4313773209d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..0fbd57e83e1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..ee6350e81b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..4ece32d35fb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..600220316e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..86d13d45dfd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..0518188760a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..09eae601c73 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..3401620e013 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..ad879802183 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package serviceresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..f1ff6f6e9bb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..da2f5e4704d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..d6e6b8e2780 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_validationerror.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_validationerror.go new file mode 100644 index 00000000000..478e5e29376 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_validationerror.go @@ -0,0 +1,9 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/serviceresource/model_waitstatistics.go new file mode 100644 index 00000000000..2320ba49148 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/model_waitstatistics.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/predicates.go b/resource-manager/datamigration/2025-06-30/serviceresource/predicates.go new file mode 100644 index 00000000000..95cce1e7e97 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/predicates.go @@ -0,0 +1,83 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuOperationPredicate struct { + ResourceType *string +} + +func (p AvailableServiceSkuOperationPredicate) Matches(input AvailableServiceSku) bool { + + if p.ResourceType != nil && (input.ResourceType == nil || *p.ResourceType != *input.ResourceType) { + return false + } + + return true +} + +type DataMigrationServiceOperationPredicate struct { + Etag *string + Id *string + Kind *string + Location *string + Name *string + Type *string +} + +func (p DataMigrationServiceOperationPredicate) Matches(input DataMigrationService) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Kind != nil && (input.Kind == nil || *p.Kind != *input.Kind) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type ProjectTaskOperationPredicate struct { + Etag *string + Id *string + Name *string + Type *string +} + +func (p ProjectTaskOperationPredicate) Matches(input ProjectTask) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/serviceresource/version.go b/resource-manager/datamigration/2025-06-30/serviceresource/version.go new file mode 100644 index 00000000000..2dff1346e53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/serviceresource/version.go @@ -0,0 +1,10 @@ +package serviceresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/serviceresource/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/README.md b/resource-manager/datamigration/2025-06-30/servicetaskresource/README.md new file mode 100644 index 00000000000..c0fa4830ab3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/README.md @@ -0,0 +1,110 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/servicetaskresource` Documentation + +The `servicetaskresource` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/servicetaskresource" +``` + + +### Client Initialization + +```go +client := servicetaskresource.NewServiceTaskResourceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `ServiceTaskResourceClient.ServiceTasksCancel` + +```go +ctx := context.TODO() +id := servicetaskresource.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksCancel(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ServiceTaskResourceClient.ServiceTasksCreateOrUpdate` + +```go +ctx := context.TODO() +id := servicetaskresource.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +payload := servicetaskresource.ProjectTask{ + // ... +} + + +read, err := client.ServiceTasksCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ServiceTaskResourceClient.ServiceTasksDelete` + +```go +ctx := context.TODO() +id := servicetaskresource.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksDelete(ctx, id, servicetaskresource.DefaultServiceTasksDeleteOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ServiceTaskResourceClient.ServiceTasksGet` + +```go +ctx := context.TODO() +id := servicetaskresource.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksGet(ctx, id, servicetaskresource.DefaultServiceTasksGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `ServiceTaskResourceClient.ServiceTasksUpdate` + +```go +ctx := context.TODO() +id := servicetaskresource.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +payload := servicetaskresource.ProjectTask{ + // ... +} + + +read, err := client.ServiceTasksUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/client.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/client.go new file mode 100644 index 00000000000..d47d30e2813 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/client.go @@ -0,0 +1,26 @@ +package servicetaskresource + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTaskResourceClient struct { + Client *resourcemanager.Client +} + +func NewServiceTaskResourceClientWithBaseURI(sdkApi sdkEnv.Api) (*ServiceTaskResourceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "servicetaskresource", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating ServiceTaskResourceClient: %+v", err) + } + + return &ServiceTaskResourceClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/constants.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/constants.go new file mode 100644 index 00000000000..fba0f643cf0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/constants.go @@ -0,0 +1,2105 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/id_servicetask.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/id_servicetask.go new file mode 100644 index 00000000000..5e3466f84de --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/id_servicetask.go @@ -0,0 +1,139 @@ +package servicetaskresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/id_servicetask_test.go new file mode 100644 index 00000000000..4707016df64 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/id_servicetask_test.go @@ -0,0 +1,327 @@ +package servicetaskresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetaskscancel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetaskscancel.go new file mode 100644 index 00000000000..571ce1ec259 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetaskscancel.go @@ -0,0 +1,54 @@ +package servicetaskresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksCancelOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksCancel ... +func (c ServiceTaskResourceClient) ServiceTasksCancel(ctx context.Context, id ServiceTaskId) (result ServiceTasksCancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetaskscreateorupdate.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetaskscreateorupdate.go new file mode 100644 index 00000000000..139e9010af9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetaskscreateorupdate.go @@ -0,0 +1,58 @@ +package servicetaskresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksCreateOrUpdate ... +func (c ServiceTaskResourceClient) ServiceTasksCreateOrUpdate(ctx context.Context, id ServiceTaskId, input ProjectTask) (result ServiceTasksCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksdelete.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksdelete.go new file mode 100644 index 00000000000..9012f747ae4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksdelete.go @@ -0,0 +1,77 @@ +package servicetaskresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type ServiceTasksDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultServiceTasksDeleteOperationOptions() ServiceTasksDeleteOperationOptions { + return ServiceTasksDeleteOperationOptions{} +} + +func (o ServiceTasksDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ServiceTasksDelete ... +func (c ServiceTaskResourceClient) ServiceTasksDelete(ctx context.Context, id ServiceTaskId, options ServiceTasksDeleteOperationOptions) (result ServiceTasksDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksget.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksget.go new file mode 100644 index 00000000000..756b5616001 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksget.go @@ -0,0 +1,83 @@ +package servicetaskresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +type ServiceTasksGetOperationOptions struct { + Expand *string +} + +func DefaultServiceTasksGetOperationOptions() ServiceTasksGetOperationOptions { + return ServiceTasksGetOperationOptions{} +} + +func (o ServiceTasksGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + return &out +} + +// ServiceTasksGet ... +func (c ServiceTaskResourceClient) ServiceTasksGet(ctx context.Context, id ServiceTaskId, options ServiceTasksGetOperationOptions) (result ServiceTasksGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksupdate.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksupdate.go new file mode 100644 index 00000000000..88c7b7ecf8c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/method_servicetasksupdate.go @@ -0,0 +1,57 @@ +package servicetaskresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksUpdate ... +func (c ServiceTaskResourceClient) ServiceTasksUpdate(ctx context.Context, id ServiceTaskId, input ProjectTask) (result ServiceTasksUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..a8f8240eefc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_backupfileinfo.go new file mode 100644 index 00000000000..1fbddcbd0d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_backupsetinfo.go new file mode 100644 index 00000000000..ecf870b9e15 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_blobshare.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_blobshare.go new file mode 100644 index 00000000000..ab78f05ed9c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_blobshare.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_commandproperties.go new file mode 100644 index 00000000000..032c35bd12e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_commandproperties.go @@ -0,0 +1,85 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connectioninfo.go new file mode 100644 index 00000000000..a2c64b788f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connectioninfo.go @@ -0,0 +1,117 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..9f08a9fd5e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..7db7e4a33e2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..6b12975d40d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..49f51bc87cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..bb995544622 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..213a3392394 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..4baa3251c97 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..71b6eeeff8c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..ed0a4b41937 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..0e2412bbf23 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..b80ca69cdbe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..ceea5261aca --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..62d816134d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..b523b90de83 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..9dec0a29cbd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..9ceb6f0477a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..2d2bdb51fff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..5ba0f12a5d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..adfffda41f5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..e829ea3d57a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..d182485de3e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..e57d9413358 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..3016f9733b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..c86074d5004 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..78f81dc3697 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..7f5ebde7619 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..9939a8f8468 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..53b0b3039ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..17576b4ab42 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..b9111ec7f8c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..31a53075810 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..8be079059fa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..ebaf018264d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..96bcfbc06d5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..01843538eb3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..4e28688a8d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..d0da26e3171 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..bde5a00a2be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..c5ede813a9e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasebackupinfo.go new file mode 100644 index 00000000000..6a0659b8288 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasefileinfo.go new file mode 100644 index 00000000000..045b908d873 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasesummaryresult.go new file mode 100644 index 00000000000..d98e6ec7740 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasetable.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasetable.go new file mode 100644 index 00000000000..34900c25a3b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_databasetable.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..cb36c432b5a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..69f12558912 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_executionstatistics.go new file mode 100644 index 00000000000..19a7747214d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_executionstatistics.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_fileshare.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_fileshare.go new file mode 100644 index 00000000000..d8e3358879a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_fileshare.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..ef102fc92ee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..304043c9b6e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..75d4f79552a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..86edd31a9bf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..2b4e464533a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..3240e9fe8aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..9d47b1a3ffb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..ddd47a242bc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..b0dd383d5eb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..bc5889f2e5e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..e8dd5c1e4b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..2dd62fbfdf1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..29daebca847 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..80235a52ae5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..611449371ae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..23e87601cf2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..f508a8733ba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..8b217f4e76b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..7e6f858a463 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..49655f65fe8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..96d5ec390d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..9fb872ace0c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..9eb906bb39b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..cafbfad7244 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..bbb8ad7bb32 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..a3bc37afbed --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..550715cc0be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..26c14ef05c2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..7c7dc63d526 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..979d0285982 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..8b9250cbf1e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..3d1765f1793 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..9eba31ddc8d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..f41578d1e14 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..cad1bfab0a3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..da3a87f264a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..1786397d0d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..bc4208f97f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..33fd87c64da --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..8be6cd02ba9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..e3fe8c3e586 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..fbf6fdb68a4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..0c69ff58afb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..3ebd841db57 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..fcaec093c0a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..f93ca6c8909 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..14aae51ab2d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..31c81adeb8b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..3acd3b9921f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..0b25735ed05 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..0957da1d2aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..3e663aaf227 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..55e4cf6618b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..1c09ed74b39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..d90dca28256 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..9bd7f279c45 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..1d05b89097c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..80e786f31f2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..595c0f60e1f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..85364eee7fa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..c5dfc96ca50 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..4275d785c47 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..c03e8f9bf05 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..9d0fdf524ba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..803a205b7c2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..3a46e83f3df --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..558b01bb91a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..a7e7cf1e04d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..05584e616d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..5d90e797784 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..c7d111e96a6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..0768790b7b5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..b8d3eaab164 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..14c97bdfe5c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..e3bd3eb54a1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..f60d22f8ad6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..e560945335d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..ef7efa8f115 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..eb289f6a131 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..725897aa0a3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..86513905b27 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..56bb229f63d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..c57f36b4659 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..f5b1bfd3671 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..647837822ca --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..b4421244bb1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..de00dc1bb39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..845ba56a9ca --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..3b2a9532530 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..b6471c8af3f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..2aab6654c78 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..6b194a6276d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskinput.go new file mode 100644 index 00000000000..dab15b0914d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutput.go new file mode 100644 index 00000000000..dd3c63c16cf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..934b2f06513 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..59100f3194d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskproperties.go new file mode 100644 index 00000000000..147249f61d8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..b35a7db7610 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..2d6e210ab3e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..afd0b39dfea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..35425ac733a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationreportresult.go new file mode 100644 index 00000000000..18b12ecef3b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..02a9ff2fb55 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package servicetaskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..a096b3846e2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationresult.go new file mode 100644 index 00000000000..5b31637b2da --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..f08403d40dd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..d7c8c8d23d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..a26a68e1ad7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..5cbbddf6ecc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..7cbe6ca6422 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..1e1be2df7fa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "MongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..339609743c6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..2b46fc54c3a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..122f521b624 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodberror.go new file mode 100644 index 00000000000..be1ea6d925d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodberror.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..fb238279e27 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..e8aaab548c9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbprogress.go new file mode 100644 index 00000000000..65146ac0ed7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..687fdf7c6cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..1992c409b35 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..b6a33214e0e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..042a361229b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..0d3286e880f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_odataerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_odataerror.go new file mode 100644 index 00000000000..4e0a6987974 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_odataerror.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..c60f675477f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_orphaneduserinfo.go new file mode 100644 index 00000000000..99a245c20bf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..c946d81ba73 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_projecttask.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_projecttask.go new file mode 100644 index 00000000000..be5daafc363 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_projecttask.go @@ -0,0 +1,56 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_projecttaskproperties.go new file mode 100644 index 00000000000..cb0e8533fcf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..6e764407a23 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_queryexecutionresult.go new file mode 100644 index 00000000000..859897a1897 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_reportableexception.go new file mode 100644 index 00000000000..93ab2f4f4e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_reportableexception.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..b91b93ad030 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..5fd21304d9d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_selectedcertificateinput.go new file mode 100644 index 00000000000..fe624130637 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_serverproperties.go new file mode 100644 index 00000000000..936f3950d20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_serverproperties.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..cfa1a22e190 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..b22df2ae839 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_ssismigrationinfo.go new file mode 100644 index 00000000000..bd11e23623e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..e50e45a78b5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..a2ef705c3d7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..77d79e64c85 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..c8bba6b65f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..149cc019045 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..6f679813c0b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..8af8a9623f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..99975352f0e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..bfde4cecba2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..a5ec6063a7c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package servicetaskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..a3044eed366 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..0cb737b6402 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..f2ebd2642dc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validationerror.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validationerror.go new file mode 100644 index 00000000000..7c14d611038 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_validationerror.go @@ -0,0 +1,9 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_waitstatistics.go new file mode 100644 index 00000000000..4beaf8108cb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/model_waitstatistics.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/servicetaskresource/version.go b/resource-manager/datamigration/2025-06-30/servicetaskresource/version.go new file mode 100644 index 00000000000..911c98738c3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/servicetaskresource/version.go @@ -0,0 +1,10 @@ +package servicetaskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/servicetaskresource/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/README.md b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/README.md new file mode 100644 index 00000000000..d3f49f84087 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/README.md @@ -0,0 +1,208 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/sqlmigrationservices` Documentation + +The `sqlmigrationservices` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/sqlmigrationservices" +``` + + +### Client Initialization + +```go +client := sqlmigrationservices.NewSqlMigrationServicesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `SqlMigrationServicesClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +payload := sqlmigrationservices.SqlMigrationService{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `SqlMigrationServicesClient.Delete` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `SqlMigrationServicesClient.DeleteNode` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +payload := sqlmigrationservices.DeleteNode{ + // ... +} + + +read, err := client.DeleteNode(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SqlMigrationServicesClient.Get` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SqlMigrationServicesClient.ListAuthKeys` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +read, err := client.ListAuthKeys(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SqlMigrationServicesClient.ListByResourceGroup` + +```go +ctx := context.TODO() +id := commonids.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.ListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `SqlMigrationServicesClient.ListBySubscription` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ListBySubscription(ctx, id)` can be used to do batched pagination +items, err := client.ListBySubscriptionComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `SqlMigrationServicesClient.ListMigrations` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +// alternatively `client.ListMigrations(ctx, id)` can be used to do batched pagination +items, err := client.ListMigrationsComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `SqlMigrationServicesClient.ListMonitoringData` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +read, err := client.ListMonitoringData(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SqlMigrationServicesClient.RegenerateAuthKeys` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +payload := sqlmigrationservices.RegenAuthKeys{ + // ... +} + + +read, err := client.RegenerateAuthKeys(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SqlMigrationServicesClient.Update` + +```go +ctx := context.TODO() +id := sqlmigrationservices.NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + +payload := sqlmigrationservices.SqlMigrationServiceUpdate{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/client.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/client.go new file mode 100644 index 00000000000..9f7392d4f6b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/client.go @@ -0,0 +1,26 @@ +package sqlmigrationservices + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMigrationServicesClient struct { + Client *resourcemanager.Client +} + +func NewSqlMigrationServicesClientWithBaseURI(sdkApi sdkEnv.Api) (*SqlMigrationServicesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "sqlmigrationservices", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating SqlMigrationServicesClient: %+v", err) + } + + return &SqlMigrationServicesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/constants.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/constants.go new file mode 100644 index 00000000000..d627f5c4f53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/constants.go @@ -0,0 +1,198 @@ +package sqlmigrationservices + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthType string + +const ( + AuthTypeAccountKey AuthType = "AccountKey" + AuthTypeManagedIdentity AuthType = "ManagedIdentity" +) + +func PossibleValuesForAuthType() []string { + return []string{ + string(AuthTypeAccountKey), + string(AuthTypeManagedIdentity), + } +} + +func (s *AuthType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthType(input string) (*AuthType, error) { + vals := map[string]AuthType{ + "accountkey": AuthTypeAccountKey, + "managedidentity": AuthTypeManagedIdentity, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthType(input) + return &out, nil +} + +type MongoMigrationStatus string + +const ( + MongoMigrationStatusCanceled MongoMigrationStatus = "Canceled" + MongoMigrationStatusCompleted MongoMigrationStatus = "Completed" + MongoMigrationStatusFailed MongoMigrationStatus = "Failed" + MongoMigrationStatusInProgress MongoMigrationStatus = "InProgress" + MongoMigrationStatusNotStarted MongoMigrationStatus = "NotStarted" +) + +func PossibleValuesForMongoMigrationStatus() []string { + return []string{ + string(MongoMigrationStatusCanceled), + string(MongoMigrationStatusCompleted), + string(MongoMigrationStatusFailed), + string(MongoMigrationStatusInProgress), + string(MongoMigrationStatusNotStarted), + } +} + +func (s *MongoMigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoMigrationStatus(input string) (*MongoMigrationStatus, error) { + vals := map[string]MongoMigrationStatus{ + "canceled": MongoMigrationStatusCanceled, + "completed": MongoMigrationStatusCompleted, + "failed": MongoMigrationStatusFailed, + "inprogress": MongoMigrationStatusInProgress, + "notstarted": MongoMigrationStatusNotStarted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoMigrationStatus(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateProvisioning ProvisioningState = "Provisioning" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateFailed), + string(ProvisioningStateProvisioning), + string(ProvisioningStateSucceeded), + string(ProvisioningStateUpdating), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "failed": ProvisioningStateFailed, + "provisioning": ProvisioningStateProvisioning, + "succeeded": ProvisioningStateSucceeded, + "updating": ProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type ResourceType string + +const ( + ResourceTypeMongoToCosmosDbMongo ResourceType = "MongoToCosmosDbMongo" + ResourceTypeSqlDb ResourceType = "SqlDb" + ResourceTypeSqlMi ResourceType = "SqlMi" + ResourceTypeSqlVM ResourceType = "SqlVm" +) + +func PossibleValuesForResourceType() []string { + return []string{ + string(ResourceTypeMongoToCosmosDbMongo), + string(ResourceTypeSqlDb), + string(ResourceTypeSqlMi), + string(ResourceTypeSqlVM), + } +} + +func (s *ResourceType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceType(input string) (*ResourceType, error) { + vals := map[string]ResourceType{ + "mongotocosmosdbmongo": ResourceTypeMongoToCosmosDbMongo, + "sqldb": ResourceTypeSqlDb, + "sqlmi": ResourceTypeSqlMi, + "sqlvm": ResourceTypeSqlVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceType(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/id_sqlmigrationservice.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/id_sqlmigrationservice.go new file mode 100644 index 00000000000..d4c44ed0483 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/id_sqlmigrationservice.go @@ -0,0 +1,130 @@ +package sqlmigrationservices + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SqlMigrationServiceId{}) +} + +var _ resourceids.ResourceId = &SqlMigrationServiceId{} + +// SqlMigrationServiceId is a struct representing the Resource ID for a Sql Migration Service +type SqlMigrationServiceId struct { + SubscriptionId string + ResourceGroupName string + SqlMigrationServiceName string +} + +// NewSqlMigrationServiceID returns a new SqlMigrationServiceId struct +func NewSqlMigrationServiceID(subscriptionId string, resourceGroupName string, sqlMigrationServiceName string) SqlMigrationServiceId { + return SqlMigrationServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + SqlMigrationServiceName: sqlMigrationServiceName, + } +} + +// ParseSqlMigrationServiceID parses 'input' into a SqlMigrationServiceId +func ParseSqlMigrationServiceID(input string) (*SqlMigrationServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&SqlMigrationServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SqlMigrationServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSqlMigrationServiceIDInsensitively parses 'input' case-insensitively into a SqlMigrationServiceId +// note: this method should only be used for API response data and not user input +func ParseSqlMigrationServiceIDInsensitively(input string) (*SqlMigrationServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&SqlMigrationServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SqlMigrationServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SqlMigrationServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.SqlMigrationServiceName, ok = input.Parsed["sqlMigrationServiceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "sqlMigrationServiceName", input) + } + + return nil +} + +// ValidateSqlMigrationServiceID checks that 'input' can be parsed as a Sql Migration Service ID +func ValidateSqlMigrationServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSqlMigrationServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Sql Migration Service ID +func (id SqlMigrationServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/sqlMigrationServices/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.SqlMigrationServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Sql Migration Service ID +func (id SqlMigrationServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticSqlMigrationServices", "sqlMigrationServices", "sqlMigrationServices"), + resourceids.UserSpecifiedSegment("sqlMigrationServiceName", "sqlMigrationServiceName"), + } +} + +// String returns a human-readable description of this Sql Migration Service ID +func (id SqlMigrationServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Sql Migration Service Name: %q", id.SqlMigrationServiceName), + } + return fmt.Sprintf("Sql Migration Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/id_sqlmigrationservice_test.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/id_sqlmigrationservice_test.go new file mode 100644 index 00000000000..5717f4df9d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/id_sqlmigrationservice_test.go @@ -0,0 +1,282 @@ +package sqlmigrationservices + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SqlMigrationServiceId{} + +func TestNewSqlMigrationServiceID(t *testing.T) { + id := NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.SqlMigrationServiceName != "sqlMigrationServiceName" { + t.Fatalf("Expected %q but got %q for Segment 'SqlMigrationServiceName'", id.SqlMigrationServiceName, "sqlMigrationServiceName") + } +} + +func TestFormatSqlMigrationServiceID(t *testing.T) { + actual := NewSqlMigrationServiceID("12345678-1234-9876-4563-123456789012", "example-resource-group", "sqlMigrationServiceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/sqlMigrationServices/sqlMigrationServiceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSqlMigrationServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SqlMigrationServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/sqlMigrationServices", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/sqlMigrationServices/sqlMigrationServiceName", + Expected: &SqlMigrationServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SqlMigrationServiceName: "sqlMigrationServiceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/sqlMigrationServices/sqlMigrationServiceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSqlMigrationServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SqlMigrationServiceName != v.Expected.SqlMigrationServiceName { + t.Fatalf("Expected %q but got %q for SqlMigrationServiceName", v.Expected.SqlMigrationServiceName, actual.SqlMigrationServiceName) + } + + } +} + +func TestParseSqlMigrationServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SqlMigrationServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/sqlMigrationServices", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sQlMiGrAtIoNsErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/sqlMigrationServices/sqlMigrationServiceName", + Expected: &SqlMigrationServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + SqlMigrationServiceName: "sqlMigrationServiceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.DataMigration/sqlMigrationServices/sqlMigrationServiceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sQlMiGrAtIoNsErViCeS/sQlMiGrAtIoNsErViCeNaMe", + Expected: &SqlMigrationServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + SqlMigrationServiceName: "sQlMiGrAtIoNsErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sQlMiGrAtIoNsErViCeS/sQlMiGrAtIoNsErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSqlMigrationServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.SqlMigrationServiceName != v.Expected.SqlMigrationServiceName { + t.Fatalf("Expected %q but got %q for SqlMigrationServiceName", v.Expected.SqlMigrationServiceName, actual.SqlMigrationServiceName) + } + + } +} + +func TestSegmentsForSqlMigrationServiceId(t *testing.T) { + segments := SqlMigrationServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SqlMigrationServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_createorupdate.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_createorupdate.go new file mode 100644 index 00000000000..3b92b62111a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_createorupdate.go @@ -0,0 +1,75 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *SqlMigrationService +} + +// CreateOrUpdate ... +func (c SqlMigrationServicesClient) CreateOrUpdate(ctx context.Context, id SqlMigrationServiceId, input SqlMigrationService) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c SqlMigrationServicesClient) CreateOrUpdateThenPoll(ctx context.Context, id SqlMigrationServiceId, input SqlMigrationService) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_delete.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_delete.go new file mode 100644 index 00000000000..ff707595b23 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_delete.go @@ -0,0 +1,71 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c SqlMigrationServicesClient) Delete(ctx context.Context, id SqlMigrationServiceId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c SqlMigrationServicesClient) DeleteThenPoll(ctx context.Context, id SqlMigrationServiceId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_deletenode.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_deletenode.go new file mode 100644 index 00000000000..5e92920a349 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_deletenode.go @@ -0,0 +1,58 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteNodeOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DeleteNode +} + +// DeleteNode ... +func (c SqlMigrationServicesClient) DeleteNode(ctx context.Context, id SqlMigrationServiceId, input DeleteNode) (result DeleteNodeOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/deleteNode", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DeleteNode + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_get.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_get.go new file mode 100644 index 00000000000..6f8e694190c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_get.go @@ -0,0 +1,53 @@ +package sqlmigrationservices + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SqlMigrationService +} + +// Get ... +func (c SqlMigrationServicesClient) Get(ctx context.Context, id SqlMigrationServiceId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SqlMigrationService + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listauthkeys.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listauthkeys.go new file mode 100644 index 00000000000..e77ed6bc5c1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listauthkeys.go @@ -0,0 +1,54 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListAuthKeysOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *AuthenticationKeys +} + +// ListAuthKeys ... +func (c SqlMigrationServicesClient) ListAuthKeys(ctx context.Context, id SqlMigrationServiceId) (result ListAuthKeysOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/listAuthKeys", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model AuthenticationKeys + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listbyresourcegroup.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listbyresourcegroup.go new file mode 100644 index 00000000000..ca7499e0bbb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listbyresourcegroup.go @@ -0,0 +1,106 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]SqlMigrationService +} + +type ListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []SqlMigrationService +} + +type ListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByResourceGroup ... +func (c SqlMigrationServicesClient) ListByResourceGroup(ctx context.Context, id commonids.ResourceGroupId) (result ListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/sqlMigrationServices", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]SqlMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByResourceGroupComplete retrieves all the results into a single object +func (c SqlMigrationServicesClient) ListByResourceGroupComplete(ctx context.Context, id commonids.ResourceGroupId) (ListByResourceGroupCompleteResult, error) { + return c.ListByResourceGroupCompleteMatchingPredicate(ctx, id, SqlMigrationServiceOperationPredicate{}) +} + +// ListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c SqlMigrationServicesClient) ListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate SqlMigrationServiceOperationPredicate) (result ListByResourceGroupCompleteResult, err error) { + items := make([]SqlMigrationService, 0) + + resp, err := c.ListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listbysubscription.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listbysubscription.go new file mode 100644 index 00000000000..516e1e0effd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listbysubscription.go @@ -0,0 +1,106 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListBySubscriptionOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]SqlMigrationService +} + +type ListBySubscriptionCompleteResult struct { + LatestHttpResponse *http.Response + Items []SqlMigrationService +} + +type ListBySubscriptionCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListBySubscriptionCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListBySubscription ... +func (c SqlMigrationServicesClient) ListBySubscription(ctx context.Context, id commonids.SubscriptionId) (result ListBySubscriptionOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListBySubscriptionCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/sqlMigrationServices", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]SqlMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListBySubscriptionComplete retrieves all the results into a single object +func (c SqlMigrationServicesClient) ListBySubscriptionComplete(ctx context.Context, id commonids.SubscriptionId) (ListBySubscriptionCompleteResult, error) { + return c.ListBySubscriptionCompleteMatchingPredicate(ctx, id, SqlMigrationServiceOperationPredicate{}) +} + +// ListBySubscriptionCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c SqlMigrationServicesClient) ListBySubscriptionCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate SqlMigrationServiceOperationPredicate) (result ListBySubscriptionCompleteResult, err error) { + items := make([]SqlMigrationService, 0) + + resp, err := c.ListBySubscription(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListBySubscriptionCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listmigrations.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listmigrations.go new file mode 100644 index 00000000000..0d2022d35f5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listmigrations.go @@ -0,0 +1,105 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListMigrationsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DatabaseMigration +} + +type ListMigrationsCompleteResult struct { + LatestHttpResponse *http.Response + Items []DatabaseMigration +} + +type ListMigrationsCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListMigrationsCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListMigrations ... +func (c SqlMigrationServicesClient) ListMigrations(ctx context.Context, id SqlMigrationServiceId) (result ListMigrationsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListMigrationsCustomPager{}, + Path: fmt.Sprintf("%s/listMigrations", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DatabaseMigration `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListMigrationsComplete retrieves all the results into a single object +func (c SqlMigrationServicesClient) ListMigrationsComplete(ctx context.Context, id SqlMigrationServiceId) (ListMigrationsCompleteResult, error) { + return c.ListMigrationsCompleteMatchingPredicate(ctx, id, DatabaseMigrationOperationPredicate{}) +} + +// ListMigrationsCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c SqlMigrationServicesClient) ListMigrationsCompleteMatchingPredicate(ctx context.Context, id SqlMigrationServiceId, predicate DatabaseMigrationOperationPredicate) (result ListMigrationsCompleteResult, err error) { + items := make([]DatabaseMigration, 0) + + resp, err := c.ListMigrations(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListMigrationsCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listmonitoringdata.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listmonitoringdata.go new file mode 100644 index 00000000000..b630b45357f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_listmonitoringdata.go @@ -0,0 +1,54 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListMonitoringDataOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *IntegrationRuntimeMonitoringData +} + +// ListMonitoringData ... +func (c SqlMigrationServicesClient) ListMonitoringData(ctx context.Context, id SqlMigrationServiceId) (result ListMonitoringDataOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/listMonitoringData", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model IntegrationRuntimeMonitoringData + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_regenerateauthkeys.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_regenerateauthkeys.go new file mode 100644 index 00000000000..d8f8895986d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_regenerateauthkeys.go @@ -0,0 +1,58 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegenerateAuthKeysOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *RegenAuthKeys +} + +// RegenerateAuthKeys ... +func (c SqlMigrationServicesClient) RegenerateAuthKeys(ctx context.Context, id SqlMigrationServiceId, input RegenAuthKeys) (result RegenerateAuthKeysOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/regenerateAuthKeys", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model RegenAuthKeys + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_update.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_update.go new file mode 100644 index 00000000000..ed94cf843df --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/method_update.go @@ -0,0 +1,75 @@ +package sqlmigrationservices + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *SqlMigrationService +} + +// Update ... +func (c SqlMigrationServicesClient) Update(ctx context.Context, id SqlMigrationServiceId, input SqlMigrationServiceUpdate) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c SqlMigrationServicesClient) UpdateThenPoll(ctx context.Context, id SqlMigrationServiceId, input SqlMigrationServiceUpdate) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_authenticationkeys.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_authenticationkeys.go new file mode 100644 index 00000000000..cc5362a5ceb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_authenticationkeys.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationKeys struct { + AuthKey1 *string `json:"authKey1,omitempty"` + AuthKey2 *string `json:"authKey2,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_azureblob.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_azureblob.go new file mode 100644 index 00000000000..76db7b8c884 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_azureblob.go @@ -0,0 +1,16 @@ +package sqlmigrationservices + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureBlob struct { + AccountKey *string `json:"accountKey,omitempty"` + AuthType *AuthType `json:"authType,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_backupconfiguration.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_backupconfiguration.go new file mode 100644 index 00000000000..a447dfabeeb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_backupconfiguration.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupConfiguration struct { + SourceLocation *SourceLocation `json:"sourceLocation,omitempty"` + TargetLocation *TargetLocation `json:"targetLocation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_copyprogressdetails.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_copyprogressdetails.go new file mode 100644 index 00000000000..ea1862dec16 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_copyprogressdetails.go @@ -0,0 +1,36 @@ +package sqlmigrationservices + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CopyProgressDetails struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyStart *string `json:"copyStart,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + ParallelCopyType *string `json:"parallelCopyType,omitempty"` + RowsCopied *int64 `json:"rowsCopied,omitempty"` + RowsRead *int64 `json:"rowsRead,omitempty"` + Status *string `json:"status,omitempty"` + TableName *string `json:"tableName,omitempty"` + UsedParallelCopies *int64 `json:"usedParallelCopies,omitempty"` +} + +func (o *CopyProgressDetails) GetCopyStartAsTime() (*time.Time, error) { + if o.CopyStart == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CopyStart, "2006-01-02T15:04:05Z07:00") +} + +func (o *CopyProgressDetails) SetCopyStartAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CopyStart = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigration.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigration.go new file mode 100644 index 00000000000..d58674632e0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigration.go @@ -0,0 +1,16 @@ +package sqlmigrationservices + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigration struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseMigrationProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationbaseproperties.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationbaseproperties.go new file mode 100644 index 00000000000..8b5cd6e4b87 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationbaseproperties.go @@ -0,0 +1,92 @@ +package sqlmigrationservices + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationBaseProperties interface { + DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl +} + +var _ DatabaseMigrationBaseProperties = BaseDatabaseMigrationBasePropertiesImpl{} + +type BaseDatabaseMigrationBasePropertiesImpl struct { + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s BaseDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s +} + +var _ DatabaseMigrationBaseProperties = RawDatabaseMigrationBasePropertiesImpl{} + +// RawDatabaseMigrationBasePropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawDatabaseMigrationBasePropertiesImpl struct { + databaseMigrationBaseProperties BaseDatabaseMigrationBasePropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawDatabaseMigrationBasePropertiesImpl) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return s.databaseMigrationBaseProperties +} + +func UnmarshalDatabaseMigrationBasePropertiesImplementation(input []byte) (DatabaseMigrationBaseProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationBaseProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["kind"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseMigrationProperties") { + var out DatabaseMigrationProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoToCosmosDbMongo") { + var out DatabaseMigrationPropertiesCosmosDbMongo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + return out, nil + } + + var parent BaseDatabaseMigrationBasePropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseDatabaseMigrationBasePropertiesImpl: %+v", err) + } + + return RawDatabaseMigrationBasePropertiesImpl{ + databaseMigrationBaseProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationproperties.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationproperties.go new file mode 100644 index 00000000000..ef0c96cedc8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationproperties.go @@ -0,0 +1,98 @@ +package sqlmigrationservices + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationProperties{} + +type DatabaseMigrationProperties struct { + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceSqlConnection *SqlConnectionInformation `json:"sourceSqlConnection,omitempty"` + TargetDatabaseCollation *string `json:"targetDatabaseCollation,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationProperties) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationProperties) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationProperties) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationProperties) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationProperties{} + +func (s DatabaseMigrationProperties) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationProperties: %+v", err) + } + + decoded["kind"] = "DatabaseMigrationProperties" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiescosmosdbmongo.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiescosmosdbmongo.go new file mode 100644 index 00000000000..87fdc8bfe5f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiescosmosdbmongo.go @@ -0,0 +1,97 @@ +package sqlmigrationservices + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesCosmosDbMongo{} + +type DatabaseMigrationPropertiesCosmosDbMongo struct { + CollectionList *[]MongoMigrationCollection `json:"collectionList,omitempty"` + SourceMongoConnection *MongoConnectionInformation `json:"sourceMongoConnection,omitempty"` + TargetMongoConnection *MongoConnectionInformation `json:"targetMongoConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseMigrationPropertiesCosmosDbMongo) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} + +var _ json.Marshaler = DatabaseMigrationPropertiesCosmosDbMongo{} + +func (s DatabaseMigrationPropertiesCosmosDbMongo) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesCosmosDbMongo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + decoded["kind"] = "MongoToCosmosDbMongo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesCosmosDbMongo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqldb.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqldb.go new file mode 100644 index 00000000000..5e38e05c036 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqldb.go @@ -0,0 +1,71 @@ +package sqlmigrationservices + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlDb{} + +type DatabaseMigrationPropertiesSqlDb struct { + MigrationStatusDetails *SqlDbMigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *SqlDbOfflineConfiguration `json:"offlineConfiguration,omitempty"` + TableList *[]string `json:"tableList,omitempty"` + TargetSqlConnection *SqlConnectionInformation `json:"targetSqlConnection,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlDb) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlDb{} + +func (s DatabaseMigrationPropertiesSqlDb) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlDb + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + decoded["kind"] = "SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlDb: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqlmi.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqlmi.go new file mode 100644 index 00000000000..40d06f4671b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqlmi.go @@ -0,0 +1,70 @@ +package sqlmigrationservices + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlMi{} + +type DatabaseMigrationPropertiesSqlMi struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlMi) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlMi{} + +func (s DatabaseMigrationPropertiesSqlMi) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlMi + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + decoded["kind"] = "SqlMi" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlMi: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqlvm.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqlvm.go new file mode 100644 index 00000000000..3e496e80f96 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_databasemigrationpropertiessqlvm.go @@ -0,0 +1,70 @@ +package sqlmigrationservices + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ DatabaseMigrationBaseProperties = DatabaseMigrationPropertiesSqlVM{} + +type DatabaseMigrationPropertiesSqlVM struct { + BackupConfiguration *BackupConfiguration `json:"backupConfiguration,omitempty"` + MigrationStatusDetails *MigrationStatusDetails `json:"migrationStatusDetails,omitempty"` + OfflineConfiguration *OfflineConfiguration `json:"offlineConfiguration,omitempty"` + + // Fields inherited from DatabaseMigrationBaseProperties + + EndedOn *string `json:"endedOn,omitempty"` + Kind ResourceType `json:"kind"` + MigrationFailureError *ErrorInfo `json:"migrationFailureError,omitempty"` + MigrationOperationId *string `json:"migrationOperationId,omitempty"` + MigrationService *string `json:"migrationService,omitempty"` + MigrationStatus *string `json:"migrationStatus,omitempty"` + ProvisioningError *string `json:"provisioningError,omitempty"` + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + Scope *string `json:"scope,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` +} + +func (s DatabaseMigrationPropertiesSqlVM) DatabaseMigrationBaseProperties() BaseDatabaseMigrationBasePropertiesImpl { + return BaseDatabaseMigrationBasePropertiesImpl{ + EndedOn: s.EndedOn, + Kind: s.Kind, + MigrationFailureError: s.MigrationFailureError, + MigrationOperationId: s.MigrationOperationId, + MigrationService: s.MigrationService, + MigrationStatus: s.MigrationStatus, + ProvisioningError: s.ProvisioningError, + ProvisioningState: s.ProvisioningState, + Scope: s.Scope, + StartedOn: s.StartedOn, + } +} + +var _ json.Marshaler = DatabaseMigrationPropertiesSqlVM{} + +func (s DatabaseMigrationPropertiesSqlVM) MarshalJSON() ([]byte, error) { + type wrapper DatabaseMigrationPropertiesSqlVM + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + decoded["kind"] = "SqlVm" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling DatabaseMigrationPropertiesSqlVM: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_deletenode.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_deletenode.go new file mode 100644 index 00000000000..4179da2fe56 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_deletenode.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteNode struct { + IntegrationRuntimeName *string `json:"integrationRuntimeName,omitempty"` + NodeName *string `json:"nodeName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_errorinfo.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_errorinfo.go new file mode 100644 index 00000000000..baa1d8ce6c0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_errorinfo.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ErrorInfo struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_integrationruntimemonitoringdata.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_integrationruntimemonitoringdata.go new file mode 100644 index 00000000000..9c5470f324c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_integrationruntimemonitoringdata.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type IntegrationRuntimeMonitoringData struct { + Name *string `json:"name,omitempty"` + Nodes *[]NodeMonitoringData `json:"nodes,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_migrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_migrationstatusdetails.go new file mode 100644 index 00000000000..23889642899 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_migrationstatusdetails.go @@ -0,0 +1,20 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationStatusDetails struct { + ActiveBackupSets *[]SqlBackupSetInfo `json:"activeBackupSets,omitempty"` + BlobContainerName *string `json:"blobContainerName,omitempty"` + CompleteRestoreErrorMessage *string `json:"completeRestoreErrorMessage,omitempty"` + CurrentRestoringFilename *string `json:"currentRestoringFilename,omitempty"` + FileUploadBlockingErrors *[]string `json:"fileUploadBlockingErrors,omitempty"` + FullBackupSetInfo *SqlBackupSetInfo `json:"fullBackupSetInfo,omitempty"` + InvalidFiles *[]string `json:"invalidFiles,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *SqlBackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + LastRestoredFilename *string `json:"lastRestoredFilename,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + PendingLogBackupsCount *int64 `json:"pendingLogBackupsCount,omitempty"` + RestoreBlockingReason *string `json:"restoreBlockingReason,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongoconnectioninformation.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongoconnectioninformation.go new file mode 100644 index 00000000000..c50ab7c39d6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongoconnectioninformation.go @@ -0,0 +1,13 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoConnectionInformation struct { + ConnectionString *string `json:"connectionString,omitempty"` + Host *string `json:"host,omitempty"` + Password *string `json:"password,omitempty"` + Port *int64 `json:"port,omitempty"` + UseSsl *bool `json:"useSsl,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongomigrationcollection.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongomigrationcollection.go new file mode 100644 index 00000000000..214948ace5c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongomigrationcollection.go @@ -0,0 +1,12 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationCollection struct { + MigrationProgressDetails *MongoMigrationProgressDetails `json:"migrationProgressDetails,omitempty"` + SourceCollection *string `json:"sourceCollection,omitempty"` + SourceDatabase *string `json:"sourceDatabase,omitempty"` + TargetCollection *string `json:"targetCollection,omitempty"` + TargetDatabase *string `json:"targetDatabase,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongomigrationprogressdetails.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongomigrationprogressdetails.go new file mode 100644 index 00000000000..299c12ea243 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_mongomigrationprogressdetails.go @@ -0,0 +1,12 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoMigrationProgressDetails struct { + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + MigrationError *string `json:"migrationError,omitempty"` + MigrationStatus *MongoMigrationStatus `json:"migrationStatus,omitempty"` + ProcessedDocumentCount *int64 `json:"processedDocumentCount,omitempty"` + SourceDocumentCount *int64 `json:"sourceDocumentCount,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_nodemonitoringdata.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_nodemonitoringdata.go new file mode 100644 index 00000000000..696e95b66c7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_nodemonitoringdata.go @@ -0,0 +1,16 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NodeMonitoringData struct { + AdditionalProperties *map[string]interface{} `json:"additionalProperties,omitempty"` + AvailableMemoryInMB *int64 `json:"availableMemoryInMB,omitempty"` + ConcurrentJobsLimit *int64 `json:"concurrentJobsLimit,omitempty"` + ConcurrentJobsRunning *int64 `json:"concurrentJobsRunning,omitempty"` + CpuUtilization *int64 `json:"cpuUtilization,omitempty"` + MaxConcurrentJobs *int64 `json:"maxConcurrentJobs,omitempty"` + NodeName *string `json:"nodeName,omitempty"` + ReceivedBytes *float64 `json:"receivedBytes,omitempty"` + SentBytes *float64 `json:"sentBytes,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_offlineconfiguration.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_offlineconfiguration.go new file mode 100644 index 00000000000..a42f7bc806f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_offlineconfiguration.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OfflineConfiguration struct { + LastBackupName *string `json:"lastBackupName,omitempty"` + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_regenauthkeys.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_regenauthkeys.go new file mode 100644 index 00000000000..a705e26f61f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_regenauthkeys.go @@ -0,0 +1,10 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegenAuthKeys struct { + AuthKey1 *string `json:"authKey1,omitempty"` + AuthKey2 *string `json:"authKey2,omitempty"` + KeyName *string `json:"keyName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sourcelocation.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sourcelocation.go new file mode 100644 index 00000000000..aee0bf0330d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sourcelocation.go @@ -0,0 +1,10 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SourceLocation struct { + AzureBlob *AzureBlob `json:"azureBlob,omitempty"` + FileShare *SqlFileShare `json:"fileShare,omitempty"` + FileStorageType *string `json:"fileStorageType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlbackupfileinfo.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlbackupfileinfo.go new file mode 100644 index 00000000000..91d225de9bb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlbackupfileinfo.go @@ -0,0 +1,15 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupFileInfo struct { + CopyDuration *int64 `json:"copyDuration,omitempty"` + CopyThroughput *float64 `json:"copyThroughput,omitempty"` + DataRead *int64 `json:"dataRead,omitempty"` + DataWritten *int64 `json:"dataWritten,omitempty"` + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileName *string `json:"fileName,omitempty"` + Status *string `json:"status,omitempty"` + TotalSize *int64 `json:"totalSize,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlbackupsetinfo.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlbackupsetinfo.go new file mode 100644 index 00000000000..a50c4ce6d1b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlbackupsetinfo.go @@ -0,0 +1,48 @@ +package sqlmigrationservices + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlBackupSetInfo struct { + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *string `json:"backupType,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + FirstLSN *string `json:"firstLSN,omitempty"` + HasBackupChecksums *bool `json:"hasBackupChecksums,omitempty"` + IgnoreReasons *[]string `json:"ignoreReasons,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLSN *string `json:"lastLSN,omitempty"` + ListOfBackupFiles *[]SqlBackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *SqlBackupSetInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} + +func (o *SqlBackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *SqlBackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlconnectioninformation.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlconnectioninformation.go new file mode 100644 index 00000000000..217f58dfa32 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlconnectioninformation.go @@ -0,0 +1,13 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlConnectionInformation struct { + Authentication *string `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Password *string `json:"password,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqldbmigrationstatusdetails.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqldbmigrationstatusdetails.go new file mode 100644 index 00000000000..c60bb2ee5e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqldbmigrationstatusdetails.go @@ -0,0 +1,10 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbMigrationStatusDetails struct { + ListOfCopyProgressDetails *[]CopyProgressDetails `json:"listOfCopyProgressDetails,omitempty"` + MigrationState *string `json:"migrationState,omitempty"` + SqlDataCopyErrors *[]string `json:"sqlDataCopyErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqldbofflineconfiguration.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqldbofflineconfiguration.go new file mode 100644 index 00000000000..db5501ac403 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqldbofflineconfiguration.go @@ -0,0 +1,8 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlDbOfflineConfiguration struct { + Offline *bool `json:"offline,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlfileshare.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlfileshare.go new file mode 100644 index 00000000000..385ff24d499 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlfileshare.go @@ -0,0 +1,10 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlFileShare struct { + Password *string `json:"password,omitempty"` + Path *string `json:"path,omitempty"` + Username *string `json:"username,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationservice.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationservice.go new file mode 100644 index 00000000000..4094b8e66bc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationservice.go @@ -0,0 +1,18 @@ +package sqlmigrationservices + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMigrationService struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SqlMigrationServiceProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationserviceproperties.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationserviceproperties.go new file mode 100644 index 00000000000..a347e639029 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationserviceproperties.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMigrationServiceProperties struct { + IntegrationRuntimeState *string `json:"integrationRuntimeState,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationserviceupdate.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationserviceupdate.go new file mode 100644 index 00000000000..e6526205b3b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_sqlmigrationserviceupdate.go @@ -0,0 +1,8 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlMigrationServiceUpdate struct { + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_targetlocation.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_targetlocation.go new file mode 100644 index 00000000000..e780d4b701e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/model_targetlocation.go @@ -0,0 +1,9 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TargetLocation struct { + AccountKey *string `json:"accountKey,omitempty"` + StorageAccountResourceId *string `json:"storageAccountResourceId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/predicates.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/predicates.go new file mode 100644 index 00000000000..52b49aaab83 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/predicates.go @@ -0,0 +1,55 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseMigrationOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p DatabaseMigrationOperationPredicate) Matches(input DatabaseMigration) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type SqlMigrationServiceOperationPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p SqlMigrationServiceOperationPredicate) Matches(input SqlMigrationService) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/sqlmigrationservices/version.go b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/version.go new file mode 100644 index 00000000000..1559894392c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/sqlmigrationservices/version.go @@ -0,0 +1,10 @@ +package sqlmigrationservices + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/sqlmigrationservices/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/README.md b/resource-manager/datamigration/2025-06-30/standardoperation/README.md new file mode 100644 index 00000000000..8e16a21103d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/README.md @@ -0,0 +1,585 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/standardoperation` Documentation + +The `standardoperation` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/standardoperation" +``` + + +### Client Initialization + +```go +client := standardoperation.NewStandardOperationClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `StandardOperationClient.FilesCreateOrUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +payload := standardoperation.ProjectFile{ + // ... +} + + +read, err := client.FilesCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.FilesDelete` + +```go +ctx := context.TODO() +id := standardoperation.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesDelete(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.FilesGet` + +```go +ctx := context.TODO() +id := standardoperation.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.FilesList` + +```go +ctx := context.TODO() +id := standardoperation.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +// alternatively `client.FilesList(ctx, id)` can be used to do batched pagination +items, err := client.FilesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.FilesRead` + +```go +ctx := context.TODO() +id := standardoperation.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesRead(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.FilesReadWrite` + +```go +ctx := context.TODO() +id := standardoperation.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +read, err := client.FilesReadWrite(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.FilesUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + +payload := standardoperation.ProjectFile{ + // ... +} + + +read, err := client.FilesUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ProjectsCreateOrUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +payload := standardoperation.Project{ + // ... +} + + +read, err := client.ProjectsCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ProjectsDelete` + +```go +ctx := context.TODO() +id := standardoperation.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +read, err := client.ProjectsDelete(ctx, id, standardoperation.DefaultProjectsDeleteOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ProjectsGet` + +```go +ctx := context.TODO() +id := standardoperation.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +read, err := client.ProjectsGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ProjectsList` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ProjectsList(ctx, id)` can be used to do batched pagination +items, err := client.ProjectsListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.ProjectsUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +payload := standardoperation.Project{ + // ... +} + + +read, err := client.ProjectsUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ResourceSkusListSkus` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ResourceSkusListSkus(ctx, id)` can be used to do batched pagination +items, err := client.ResourceSkusListSkusComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.ServiceTasksCreateOrUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +payload := standardoperation.ProjectTask{ + // ... +} + + +read, err := client.ServiceTasksCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ServiceTasksDelete` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksDelete(ctx, id, standardoperation.DefaultServiceTasksDeleteOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ServiceTasksGet` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +read, err := client.ServiceTasksGet(ctx, id, standardoperation.DefaultServiceTasksGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ServiceTasksList` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ServiceTasksList(ctx, id, standardoperation.DefaultServiceTasksListOperationOptions())` can be used to do batched pagination +items, err := client.ServiceTasksListComplete(ctx, id, standardoperation.DefaultServiceTasksListOperationOptions()) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.ServiceTasksUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + +payload := standardoperation.ProjectTask{ + // ... +} + + +read, err := client.ServiceTasksUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ServicesCheckNameAvailability` + +```go +ctx := context.TODO() +id := standardoperation.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +payload := standardoperation.NameAvailabilityRequest{ + // ... +} + + +read, err := client.ServicesCheckNameAvailability(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ServicesCreateOrUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := standardoperation.DataMigrationService{ + // ... +} + + +if err := client.ServicesCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `StandardOperationClient.ServicesDelete` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +if err := client.ServicesDeleteThenPoll(ctx, id, standardoperation.DefaultServicesDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `StandardOperationClient.ServicesGet` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +read, err := client.ServicesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.ServicesList` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ServicesList(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.ServicesListByResourceGroup` + +```go +ctx := context.TODO() +id := standardoperation.NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName") + +// alternatively `client.ServicesListByResourceGroup(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListByResourceGroupComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.ServicesListSkus` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +// alternatively `client.ServicesListSkus(ctx, id)` can be used to do batched pagination +items, err := client.ServicesListSkusComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.ServicesUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + +payload := standardoperation.DataMigrationService{ + // ... +} + + +if err := client.ServicesUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `StandardOperationClient.TasksCreateOrUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := standardoperation.ProjectTask{ + // ... +} + + +read, err := client.TasksCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.TasksDelete` + +```go +ctx := context.TODO() +id := standardoperation.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksDelete(ctx, id, standardoperation.DefaultTasksDeleteOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.TasksGet` + +```go +ctx := context.TODO() +id := standardoperation.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksGet(ctx, id, standardoperation.DefaultTasksGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.TasksList` + +```go +ctx := context.TODO() +id := standardoperation.NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + +// alternatively `client.TasksList(ctx, id, standardoperation.DefaultTasksListOperationOptions())` can be used to do batched pagination +items, err := client.TasksListComplete(ctx, id, standardoperation.DefaultTasksListOperationOptions()) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `StandardOperationClient.TasksUpdate` + +```go +ctx := context.TODO() +id := standardoperation.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := standardoperation.ProjectTask{ + // ... +} + + +read, err := client.TasksUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `StandardOperationClient.UsagesList` + +```go +ctx := context.TODO() +id := standardoperation.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.UsagesList(ctx, id)` can be used to do batched pagination +items, err := client.UsagesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/client.go b/resource-manager/datamigration/2025-06-30/standardoperation/client.go new file mode 100644 index 00000000000..18dbdc555a7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/client.go @@ -0,0 +1,26 @@ +package standardoperation + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StandardOperationClient struct { + Client *resourcemanager.Client +} + +func NewStandardOperationClientWithBaseURI(sdkApi sdkEnv.Api) (*StandardOperationClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "standardoperation", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating StandardOperationClient: %+v", err) + } + + return &StandardOperationClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/constants.go b/resource-manager/datamigration/2025-06-30/standardoperation/constants.go new file mode 100644 index 00000000000..12989bc6dc4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/constants.go @@ -0,0 +1,2522 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type NameCheckFailureReason string + +const ( + NameCheckFailureReasonAlreadyExists NameCheckFailureReason = "AlreadyExists" + NameCheckFailureReasonInvalid NameCheckFailureReason = "Invalid" +) + +func PossibleValuesForNameCheckFailureReason() []string { + return []string{ + string(NameCheckFailureReasonAlreadyExists), + string(NameCheckFailureReasonInvalid), + } +} + +func (s *NameCheckFailureReason) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNameCheckFailureReason(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNameCheckFailureReason(input string) (*NameCheckFailureReason, error) { + vals := map[string]NameCheckFailureReason{ + "alreadyexists": NameCheckFailureReasonAlreadyExists, + "invalid": NameCheckFailureReasonInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NameCheckFailureReason(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ProjectProvisioningState string + +const ( + ProjectProvisioningStateDeleting ProjectProvisioningState = "Deleting" + ProjectProvisioningStateSucceeded ProjectProvisioningState = "Succeeded" +) + +func PossibleValuesForProjectProvisioningState() []string { + return []string{ + string(ProjectProvisioningStateDeleting), + string(ProjectProvisioningStateSucceeded), + } +} + +func (s *ProjectProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectProvisioningState(input string) (*ProjectProvisioningState, error) { + vals := map[string]ProjectProvisioningState{ + "deleting": ProjectProvisioningStateDeleting, + "succeeded": ProjectProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectProvisioningState(input) + return &out, nil +} + +type ProjectSourcePlatform string + +const ( + ProjectSourcePlatformMongoDb ProjectSourcePlatform = "MongoDb" + ProjectSourcePlatformMySQL ProjectSourcePlatform = "MySQL" + ProjectSourcePlatformPostgreSql ProjectSourcePlatform = "PostgreSql" + ProjectSourcePlatformSQL ProjectSourcePlatform = "SQL" + ProjectSourcePlatformUnknown ProjectSourcePlatform = "Unknown" +) + +func PossibleValuesForProjectSourcePlatform() []string { + return []string{ + string(ProjectSourcePlatformMongoDb), + string(ProjectSourcePlatformMySQL), + string(ProjectSourcePlatformPostgreSql), + string(ProjectSourcePlatformSQL), + string(ProjectSourcePlatformUnknown), + } +} + +func (s *ProjectSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectSourcePlatform(input string) (*ProjectSourcePlatform, error) { + vals := map[string]ProjectSourcePlatform{ + "mongodb": ProjectSourcePlatformMongoDb, + "mysql": ProjectSourcePlatformMySQL, + "postgresql": ProjectSourcePlatformPostgreSql, + "sql": ProjectSourcePlatformSQL, + "unknown": ProjectSourcePlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectSourcePlatform(input) + return &out, nil +} + +type ProjectTargetPlatform string + +const ( + ProjectTargetPlatformAzureDbForMySql ProjectTargetPlatform = "AzureDbForMySql" + ProjectTargetPlatformAzureDbForPostgreSql ProjectTargetPlatform = "AzureDbForPostgreSql" + ProjectTargetPlatformMongoDb ProjectTargetPlatform = "MongoDb" + ProjectTargetPlatformSQLDB ProjectTargetPlatform = "SQLDB" + ProjectTargetPlatformSQLMI ProjectTargetPlatform = "SQLMI" + ProjectTargetPlatformUnknown ProjectTargetPlatform = "Unknown" +) + +func PossibleValuesForProjectTargetPlatform() []string { + return []string{ + string(ProjectTargetPlatformAzureDbForMySql), + string(ProjectTargetPlatformAzureDbForPostgreSql), + string(ProjectTargetPlatformMongoDb), + string(ProjectTargetPlatformSQLDB), + string(ProjectTargetPlatformSQLMI), + string(ProjectTargetPlatformUnknown), + } +} + +func (s *ProjectTargetPlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProjectTargetPlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProjectTargetPlatform(input string) (*ProjectTargetPlatform, error) { + vals := map[string]ProjectTargetPlatform{ + "azuredbformysql": ProjectTargetPlatformAzureDbForMySql, + "azuredbforpostgresql": ProjectTargetPlatformAzureDbForPostgreSql, + "mongodb": ProjectTargetPlatformMongoDb, + "sqldb": ProjectTargetPlatformSQLDB, + "sqlmi": ProjectTargetPlatformSQLMI, + "unknown": ProjectTargetPlatformUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProjectTargetPlatform(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResourceSkuCapacityScaleType string + +const ( + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic" + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual" + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None" +) + +func PossibleValuesForResourceSkuCapacityScaleType() []string { + return []string{ + string(ResourceSkuCapacityScaleTypeAutomatic), + string(ResourceSkuCapacityScaleTypeManual), + string(ResourceSkuCapacityScaleTypeNone), + } +} + +func (s *ResourceSkuCapacityScaleType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceSkuCapacityScaleType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceSkuCapacityScaleType(input string) (*ResourceSkuCapacityScaleType, error) { + vals := map[string]ResourceSkuCapacityScaleType{ + "automatic": ResourceSkuCapacityScaleTypeAutomatic, + "manual": ResourceSkuCapacityScaleTypeManual, + "none": ResourceSkuCapacityScaleTypeNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceSkuCapacityScaleType(input) + return &out, nil +} + +type ResourceSkuRestrictionsReasonCode string + +const ( + ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + ResourceSkuRestrictionsReasonCodeQuotaId ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +func PossibleValuesForResourceSkuRestrictionsReasonCode() []string { + return []string{ + string(ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription), + string(ResourceSkuRestrictionsReasonCodeQuotaId), + } +} + +func (s *ResourceSkuRestrictionsReasonCode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceSkuRestrictionsReasonCode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceSkuRestrictionsReasonCode(input string) (*ResourceSkuRestrictionsReasonCode, error) { + vals := map[string]ResourceSkuRestrictionsReasonCode{ + "notavailableforsubscription": ResourceSkuRestrictionsReasonCodeNotAvailableForSubscription, + "quotaid": ResourceSkuRestrictionsReasonCodeQuotaId, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceSkuRestrictionsReasonCode(input) + return &out, nil +} + +type ResourceSkuRestrictionsType string + +const ( + ResourceSkuRestrictionsTypeLocation ResourceSkuRestrictionsType = "location" +) + +func PossibleValuesForResourceSkuRestrictionsType() []string { + return []string{ + string(ResourceSkuRestrictionsTypeLocation), + } +} + +func (s *ResourceSkuRestrictionsType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceSkuRestrictionsType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceSkuRestrictionsType(input string) (*ResourceSkuRestrictionsType, error) { + vals := map[string]ResourceSkuRestrictionsType{ + "location": ResourceSkuRestrictionsTypeLocation, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceSkuRestrictionsType(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type ServiceProvisioningState string + +const ( + ServiceProvisioningStateAccepted ServiceProvisioningState = "Accepted" + ServiceProvisioningStateDeleting ServiceProvisioningState = "Deleting" + ServiceProvisioningStateDeploying ServiceProvisioningState = "Deploying" + ServiceProvisioningStateFailed ServiceProvisioningState = "Failed" + ServiceProvisioningStateFailedToStart ServiceProvisioningState = "FailedToStart" + ServiceProvisioningStateFailedToStop ServiceProvisioningState = "FailedToStop" + ServiceProvisioningStateStarting ServiceProvisioningState = "Starting" + ServiceProvisioningStateStopped ServiceProvisioningState = "Stopped" + ServiceProvisioningStateStopping ServiceProvisioningState = "Stopping" + ServiceProvisioningStateSucceeded ServiceProvisioningState = "Succeeded" +) + +func PossibleValuesForServiceProvisioningState() []string { + return []string{ + string(ServiceProvisioningStateAccepted), + string(ServiceProvisioningStateDeleting), + string(ServiceProvisioningStateDeploying), + string(ServiceProvisioningStateFailed), + string(ServiceProvisioningStateFailedToStart), + string(ServiceProvisioningStateFailedToStop), + string(ServiceProvisioningStateStarting), + string(ServiceProvisioningStateStopped), + string(ServiceProvisioningStateStopping), + string(ServiceProvisioningStateSucceeded), + } +} + +func (s *ServiceProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceProvisioningState(input string) (*ServiceProvisioningState, error) { + vals := map[string]ServiceProvisioningState{ + "accepted": ServiceProvisioningStateAccepted, + "deleting": ServiceProvisioningStateDeleting, + "deploying": ServiceProvisioningStateDeploying, + "failed": ServiceProvisioningStateFailed, + "failedtostart": ServiceProvisioningStateFailedToStart, + "failedtostop": ServiceProvisioningStateFailedToStop, + "starting": ServiceProvisioningStateStarting, + "stopped": ServiceProvisioningStateStopped, + "stopping": ServiceProvisioningStateStopping, + "succeeded": ServiceProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceProvisioningState(input) + return &out, nil +} + +type ServiceScalability string + +const ( + ServiceScalabilityAutomatic ServiceScalability = "automatic" + ServiceScalabilityManual ServiceScalability = "manual" + ServiceScalabilityNone ServiceScalability = "none" +) + +func PossibleValuesForServiceScalability() []string { + return []string{ + string(ServiceScalabilityAutomatic), + string(ServiceScalabilityManual), + string(ServiceScalabilityNone), + } +} + +func (s *ServiceScalability) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceScalability(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceScalability(input string) (*ServiceScalability, error) { + vals := map[string]ServiceScalability{ + "automatic": ServiceScalabilityAutomatic, + "manual": ServiceScalabilityManual, + "none": ServiceScalabilityNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceScalability(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_file.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_file.go new file mode 100644 index 00000000000..5a91c520746 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_file.go @@ -0,0 +1,148 @@ +package standardoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&FileId{}) +} + +var _ resourceids.ResourceId = &FileId{} + +// FileId is a struct representing the Resource ID for a File +type FileId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + FileName string +} + +// NewFileID returns a new FileId struct +func NewFileID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, fileName string) FileId { + return FileId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + FileName: fileName, + } +} + +// ParseFileID parses 'input' into a FileId +func ParseFileID(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseFileIDInsensitively parses 'input' case-insensitively into a FileId +// note: this method should only be used for API response data and not user input +func ParseFileIDInsensitively(input string) (*FileId, error) { + parser := resourceids.NewParserFromResourceIdType(&FileId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := FileId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *FileId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.FileName, ok = input.Parsed["fileName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "fileName", input) + } + + return nil +} + +// ValidateFileID checks that 'input' can be parsed as a File ID +func ValidateFileID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseFileID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted File ID +func (id FileId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/files/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.FileName) +} + +// Segments returns a slice of Resource ID Segments which comprise this File ID +func (id FileId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticFiles", "files", "files"), + resourceids.UserSpecifiedSegment("fileName", "fileName"), + } +} + +// String returns a human-readable description of this File ID +func (id FileId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("File Name: %q", id.FileName), + } + return fmt.Sprintf("File (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_file_test.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_file_test.go new file mode 100644 index 00000000000..edf96d895e8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_file_test.go @@ -0,0 +1,372 @@ +package standardoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &FileId{} + +func TestNewFileID(t *testing.T) { + id := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.FileName != "fileName" { + t.Fatalf("Expected %q but got %q for Segment 'FileName'", id.FileName, "fileName") + } +} + +func TestFormatFileID(t *testing.T) { + actual := NewFileID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "fileName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseFileID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestParseFileIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *FileId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + FileName: "fileName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/files/fileName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE", + Expected: &FileId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + FileName: "fIlEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/fIlEs/fIlEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseFileIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.FileName != v.Expected.FileName { + t.Fatalf("Expected %q but got %q for FileName", v.Expected.FileName, actual.FileName) + } + + } +} + +func TestSegmentsForFileId(t *testing.T) { + segments := FileId{}.Segments() + if len(segments) == 0 { + t.Fatalf("FileId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_location.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_location.go new file mode 100644 index 00000000000..2ab4d2bae9d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_location.go @@ -0,0 +1,121 @@ +package standardoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&LocationId{}) +} + +var _ resourceids.ResourceId = &LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + LocationName string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *LocationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + return nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.DataMigration/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_location_test.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_location_test.go new file mode 100644 index 00000000000..7772869feab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_location_test.go @@ -0,0 +1,237 @@ +package standardoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.DataMigration/locations/locationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs/lOcAtIoNnAmE", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/lOcAtIoNs/lOcAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_project.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_project.go new file mode 100644 index 00000000000..1dd919dc933 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_project.go @@ -0,0 +1,139 @@ +package standardoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ProjectId{}) +} + +var _ resourceids.ResourceId = &ProjectId{} + +// ProjectId is a struct representing the Resource ID for a Project +type ProjectId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string +} + +// NewProjectID returns a new ProjectId struct +func NewProjectID(subscriptionId string, resourceGroupName string, serviceName string, projectName string) ProjectId { + return ProjectId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + } +} + +// ParseProjectID parses 'input' into a ProjectId +func ParseProjectID(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseProjectIDInsensitively parses 'input' case-insensitively into a ProjectId +// note: this method should only be used for API response data and not user input +func ParseProjectIDInsensitively(input string) (*ProjectId, error) { + parser := resourceids.NewParserFromResourceIdType(&ProjectId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ProjectId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ProjectId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + return nil +} + +// ValidateProjectID checks that 'input' can be parsed as a Project ID +func ValidateProjectID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseProjectID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Project ID +func (id ProjectId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Project ID +func (id ProjectId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + } +} + +// String returns a human-readable description of this Project ID +func (id ProjectId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + } + return fmt.Sprintf("Project (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_project_test.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_project_test.go new file mode 100644 index 00000000000..076fcc9395d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_project_test.go @@ -0,0 +1,327 @@ +package standardoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ProjectId{} + +func TestNewProjectID(t *testing.T) { + id := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } +} + +func TestFormatProjectID(t *testing.T) { + actual := NewProjectID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseProjectID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestParseProjectIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ProjectId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Expected: &ProjectId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseProjectIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + } +} + +func TestSegmentsForProjectId(t *testing.T) { + segments := ProjectId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ProjectId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_service.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_service.go new file mode 100644 index 00000000000..dadc95540e3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_service.go @@ -0,0 +1,130 @@ +package standardoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceId{}) +} + +var _ resourceids.ResourceId = &ServiceId{} + +// ServiceId is a struct representing the Resource ID for a Service +type ServiceId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string +} + +// NewServiceID returns a new ServiceId struct +func NewServiceID(subscriptionId string, resourceGroupName string, serviceName string) ServiceId { + return ServiceId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + } +} + +// ParseServiceID parses 'input' into a ServiceId +func ParseServiceID(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceIDInsensitively parses 'input' case-insensitively into a ServiceId +// note: this method should only be used for API response data and not user input +func ParseServiceIDInsensitively(input string) (*ServiceId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + return nil +} + +// ValidateServiceID checks that 'input' can be parsed as a Service ID +func ValidateServiceID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service ID +func (id ServiceId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service ID +func (id ServiceId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + } +} + +// String returns a human-readable description of this Service ID +func (id ServiceId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + } + return fmt.Sprintf("Service (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_service_test.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_service_test.go new file mode 100644 index 00000000000..77ad8489e7f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_service_test.go @@ -0,0 +1,282 @@ +package standardoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceId{} + +func TestNewServiceID(t *testing.T) { + id := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } +} + +func TestFormatServiceID(t *testing.T) { + actual := NewServiceID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestParseServiceIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Expected: &ServiceId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + } +} + +func TestSegmentsForServiceId(t *testing.T) { + segments := ServiceId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_servicetask.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_servicetask.go new file mode 100644 index 00000000000..fe6e03174b2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_servicetask.go @@ -0,0 +1,139 @@ +package standardoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&ServiceTaskId{}) +} + +var _ resourceids.ResourceId = &ServiceTaskId{} + +// ServiceTaskId is a struct representing the Resource ID for a Service Task +type ServiceTaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ServiceTaskName string +} + +// NewServiceTaskID returns a new ServiceTaskId struct +func NewServiceTaskID(subscriptionId string, resourceGroupName string, serviceName string, serviceTaskName string) ServiceTaskId { + return ServiceTaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ServiceTaskName: serviceTaskName, + } +} + +// ParseServiceTaskID parses 'input' into a ServiceTaskId +func ParseServiceTaskID(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseServiceTaskIDInsensitively parses 'input' case-insensitively into a ServiceTaskId +// note: this method should only be used for API response data and not user input +func ParseServiceTaskIDInsensitively(input string) (*ServiceTaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&ServiceTaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := ServiceTaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *ServiceTaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ServiceTaskName, ok = input.Parsed["serviceTaskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceTaskName", input) + } + + return nil +} + +// ValidateServiceTaskID checks that 'input' can be parsed as a Service Task ID +func ValidateServiceTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseServiceTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Service Task ID +func (id ServiceTaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/serviceTasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ServiceTaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Service Task ID +func (id ServiceTaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticServiceTasks", "serviceTasks", "serviceTasks"), + resourceids.UserSpecifiedSegment("serviceTaskName", "serviceTaskName"), + } +} + +// String returns a human-readable description of this Service Task ID +func (id ServiceTaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Service Task Name: %q", id.ServiceTaskName), + } + return fmt.Sprintf("Service Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_servicetask_test.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_servicetask_test.go new file mode 100644 index 00000000000..082bc4c0c98 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_servicetask_test.go @@ -0,0 +1,327 @@ +package standardoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &ServiceTaskId{} + +func TestNewServiceTaskID(t *testing.T) { + id := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ServiceTaskName != "serviceTaskName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceTaskName'", id.ServiceTaskName, "serviceTaskName") + } +} + +func TestFormatServiceTaskID(t *testing.T) { + actual := NewServiceTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "serviceTaskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseServiceTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestParseServiceTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *ServiceTaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ServiceTaskName: "serviceTaskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/serviceTasks/serviceTaskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe", + Expected: &ServiceTaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ServiceTaskName: "sErViCeTaSkNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/sErViCeTaSkS/sErViCeTaSkNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseServiceTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ServiceTaskName != v.Expected.ServiceTaskName { + t.Fatalf("Expected %q but got %q for ServiceTaskName", v.Expected.ServiceTaskName, actual.ServiceTaskName) + } + + } +} + +func TestSegmentsForServiceTaskId(t *testing.T) { + segments := ServiceTaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("ServiceTaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_subscriptionresourcegroup.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_subscriptionresourcegroup.go new file mode 100644 index 00000000000..883c81cc560 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_subscriptionresourcegroup.go @@ -0,0 +1,119 @@ +package standardoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SubscriptionResourceGroupId{}) +} + +var _ resourceids.ResourceId = &SubscriptionResourceGroupId{} + +// SubscriptionResourceGroupId is a struct representing the Resource ID for a Subscription Resource Group +type SubscriptionResourceGroupId struct { + SubscriptionId string + ResourceGroupName string +} + +// NewSubscriptionResourceGroupID returns a new SubscriptionResourceGroupId struct +func NewSubscriptionResourceGroupID(subscriptionId string, resourceGroupName string) SubscriptionResourceGroupId { + return SubscriptionResourceGroupId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + } +} + +// ParseSubscriptionResourceGroupID parses 'input' into a SubscriptionResourceGroupId +func ParseSubscriptionResourceGroupID(input string) (*SubscriptionResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubscriptionResourceGroupId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubscriptionResourceGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSubscriptionResourceGroupIDInsensitively parses 'input' case-insensitively into a SubscriptionResourceGroupId +// note: this method should only be used for API response data and not user input +func ParseSubscriptionResourceGroupIDInsensitively(input string) (*SubscriptionResourceGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubscriptionResourceGroupId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubscriptionResourceGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SubscriptionResourceGroupId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + return nil +} + +// ValidateSubscriptionResourceGroupID checks that 'input' can be parsed as a Subscription Resource Group ID +func ValidateSubscriptionResourceGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSubscriptionResourceGroupID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Subscription Resource Group ID +func (id SubscriptionResourceGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Subscription Resource Group ID +func (id SubscriptionResourceGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + } +} + +// String returns a human-readable description of this Subscription Resource Group ID +func (id SubscriptionResourceGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + } + return fmt.Sprintf("Subscription Resource Group (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_subscriptionresourcegroup_test.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_subscriptionresourcegroup_test.go new file mode 100644 index 00000000000..645d74f419d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_subscriptionresourcegroup_test.go @@ -0,0 +1,207 @@ +package standardoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SubscriptionResourceGroupId{} + +func TestNewSubscriptionResourceGroupID(t *testing.T) { + id := NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } +} + +func TestFormatSubscriptionResourceGroupID(t *testing.T) { + actual := NewSubscriptionResourceGroupID("12345678-1234-9876-4563-123456789012", "resourceGroupName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSubscriptionResourceGroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionResourceGroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} + +func TestParseSubscriptionResourceGroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubscriptionResourceGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Expected: &SubscriptionResourceGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubscriptionResourceGroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + } +} + +func TestSegmentsForSubscriptionResourceGroupId(t *testing.T) { + segments := SubscriptionResourceGroupId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SubscriptionResourceGroupId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_task.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_task.go new file mode 100644 index 00000000000..7d8b593e186 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_task.go @@ -0,0 +1,148 @@ +package standardoperation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/id_task_test.go b/resource-manager/datamigration/2025-06-30/standardoperation/id_task_test.go new file mode 100644 index 00000000000..81ff8432eb0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/id_task_test.go @@ -0,0 +1,372 @@ +package standardoperation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_filescreateorupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_filescreateorupdate.go new file mode 100644 index 00000000000..4dc618827b7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_filescreateorupdate.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesCreateOrUpdate ... +func (c StandardOperationClient) FilesCreateOrUpdate(ctx context.Context, id FileId, input ProjectFile) (result FilesCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_filesdelete.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesdelete.go new file mode 100644 index 00000000000..49abfd7f2c9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesdelete.go @@ -0,0 +1,47 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +// FilesDelete ... +func (c StandardOperationClient) FilesDelete(ctx context.Context, id FileId) (result FilesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_filesget.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesget.go new file mode 100644 index 00000000000..2ff0250ea1e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesget.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesGet ... +func (c StandardOperationClient) FilesGet(ctx context.Context, id FileId) (result FilesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_fileslist.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_fileslist.go new file mode 100644 index 00000000000..b460fdc7a7c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_fileslist.go @@ -0,0 +1,105 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectFile +} + +type FilesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectFile +} + +type FilesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *FilesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// FilesList ... +func (c StandardOperationClient) FilesList(ctx context.Context, id ProjectId) (result FilesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &FilesListCustomPager{}, + Path: fmt.Sprintf("%s/files", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectFile `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// FilesListComplete retrieves all the results into a single object +func (c StandardOperationClient) FilesListComplete(ctx context.Context, id ProjectId) (FilesListCompleteResult, error) { + return c.FilesListCompleteMatchingPredicate(ctx, id, ProjectFileOperationPredicate{}) +} + +// FilesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) FilesListCompleteMatchingPredicate(ctx context.Context, id ProjectId, predicate ProjectFileOperationPredicate) (result FilesListCompleteResult, err error) { + items := make([]ProjectFile, 0) + + resp, err := c.FilesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = FilesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_filesread.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesread.go new file mode 100644 index 00000000000..07fe4979221 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesread.go @@ -0,0 +1,54 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesReadOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *FileStorageInfo +} + +// FilesRead ... +func (c StandardOperationClient) FilesRead(ctx context.Context, id FileId) (result FilesReadOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/read", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model FileStorageInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_filesreadwrite.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesreadwrite.go new file mode 100644 index 00000000000..afe50ed2688 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesreadwrite.go @@ -0,0 +1,54 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesReadWriteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *FileStorageInfo +} + +// FilesReadWrite ... +func (c StandardOperationClient) FilesReadWrite(ctx context.Context, id FileId) (result FilesReadWriteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/readwrite", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model FileStorageInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_filesupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesupdate.go new file mode 100644 index 00000000000..c717a324a43 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_filesupdate.go @@ -0,0 +1,57 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilesUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectFile +} + +// FilesUpdate ... +func (c StandardOperationClient) FilesUpdate(ctx context.Context, id FileId, input ProjectFile) (result FilesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectFile + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_projectscreateorupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectscreateorupdate.go new file mode 100644 index 00000000000..03b5f09e6e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectscreateorupdate.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsCreateOrUpdate ... +func (c StandardOperationClient) ProjectsCreateOrUpdate(ctx context.Context, id ProjectId, input Project) (result ProjectsCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsdelete.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsdelete.go new file mode 100644 index 00000000000..f1c7ee96cfc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsdelete.go @@ -0,0 +1,77 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type ProjectsDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultProjectsDeleteOperationOptions() ProjectsDeleteOperationOptions { + return ProjectsDeleteOperationOptions{} +} + +func (o ProjectsDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ProjectsDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ProjectsDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ProjectsDelete ... +func (c StandardOperationClient) ProjectsDelete(ctx context.Context, id ProjectId, options ProjectsDeleteOperationOptions) (result ProjectsDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsget.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsget.go new file mode 100644 index 00000000000..360c8039781 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsget.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsGet ... +func (c StandardOperationClient) ProjectsGet(ctx context.Context, id ProjectId) (result ProjectsGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_projectslist.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectslist.go new file mode 100644 index 00000000000..b9b8c13baba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectslist.go @@ -0,0 +1,105 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Project +} + +type ProjectsListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Project +} + +type ProjectsListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ProjectsListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ProjectsList ... +func (c StandardOperationClient) ProjectsList(ctx context.Context, id ServiceId) (result ProjectsListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ProjectsListCustomPager{}, + Path: fmt.Sprintf("%s/projects", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Project `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ProjectsListComplete retrieves all the results into a single object +func (c StandardOperationClient) ProjectsListComplete(ctx context.Context, id ServiceId) (ProjectsListCompleteResult, error) { + return c.ProjectsListCompleteMatchingPredicate(ctx, id, ProjectOperationPredicate{}) +} + +// ProjectsListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) ProjectsListCompleteMatchingPredicate(ctx context.Context, id ServiceId, predicate ProjectOperationPredicate) (result ProjectsListCompleteResult, err error) { + items := make([]Project, 0) + + resp, err := c.ProjectsList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ProjectsListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsupdate.go new file mode 100644 index 00000000000..aa949a7e7b9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_projectsupdate.go @@ -0,0 +1,57 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectsUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Project +} + +// ProjectsUpdate ... +func (c StandardOperationClient) ProjectsUpdate(ctx context.Context, id ProjectId, input Project) (result ProjectsUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Project + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_resourceskuslistskus.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_resourceskuslistskus.go new file mode 100644 index 00000000000..4ceab2e61c1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_resourceskuslistskus.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkusListSkusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ResourceSku +} + +type ResourceSkusListSkusCompleteResult struct { + LatestHttpResponse *http.Response + Items []ResourceSku +} + +type ResourceSkusListSkusCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ResourceSkusListSkusCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ResourceSkusListSkus ... +func (c StandardOperationClient) ResourceSkusListSkus(ctx context.Context, id commonids.SubscriptionId) (result ResourceSkusListSkusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ResourceSkusListSkusCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/skus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ResourceSku `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ResourceSkusListSkusComplete retrieves all the results into a single object +func (c StandardOperationClient) ResourceSkusListSkusComplete(ctx context.Context, id commonids.SubscriptionId) (ResourceSkusListSkusCompleteResult, error) { + return c.ResourceSkusListSkusCompleteMatchingPredicate(ctx, id, ResourceSkuOperationPredicate{}) +} + +// ResourceSkusListSkusCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) ResourceSkusListSkusCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate ResourceSkuOperationPredicate) (result ResourceSkusListSkusCompleteResult, err error) { + items := make([]ResourceSku, 0) + + resp, err := c.ResourceSkusListSkus(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ResourceSkusListSkusCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceschecknameavailability.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceschecknameavailability.go new file mode 100644 index 00000000000..06138e3ed52 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceschecknameavailability.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCheckNameAvailabilityOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *NameAvailabilityResponse +} + +// ServicesCheckNameAvailability ... +func (c StandardOperationClient) ServicesCheckNameAvailability(ctx context.Context, id LocationId, input NameAvailabilityRequest) (result ServicesCheckNameAvailabilityOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkNameAvailability", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model NameAvailabilityResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicescreateorupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicescreateorupdate.go new file mode 100644 index 00000000000..617f7ccda00 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicescreateorupdate.go @@ -0,0 +1,76 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesCreateOrUpdate ... +func (c StandardOperationClient) ServicesCreateOrUpdate(ctx context.Context, id ServiceId, input DataMigrationService) (result ServicesCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesCreateOrUpdateThenPoll performs ServicesCreateOrUpdate then polls until it's completed +func (c StandardOperationClient) ServicesCreateOrUpdateThenPoll(ctx context.Context, id ServiceId, input DataMigrationService) error { + result, err := c.ServicesCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ServicesCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesdelete.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesdelete.go new file mode 100644 index 00000000000..cde486f14d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesdelete.go @@ -0,0 +1,100 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type ServicesDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultServicesDeleteOperationOptions() ServicesDeleteOperationOptions { + return ServicesDeleteOperationOptions{} +} + +func (o ServicesDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServicesDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServicesDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ServicesDelete ... +func (c StandardOperationClient) ServicesDelete(ctx context.Context, id ServiceId, options ServicesDeleteOperationOptions) (result ServicesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesDeleteThenPoll performs ServicesDelete then polls until it's completed +func (c StandardOperationClient) ServicesDeleteThenPoll(ctx context.Context, id ServiceId, options ServicesDeleteOperationOptions) error { + result, err := c.ServicesDelete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing ServicesDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesget.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesget.go new file mode 100644 index 00000000000..7c357a21ed6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesget.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesGet ... +func (c StandardOperationClient) ServicesGet(ctx context.Context, id ServiceId) (result ServicesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model DataMigrationService + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslist.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslist.go new file mode 100644 index 00000000000..6edea8780ee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslist.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataMigrationService +} + +type ServicesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataMigrationService +} + +type ServicesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesList ... +func (c StandardOperationClient) ServicesList(ctx context.Context, id commonids.SubscriptionId) (result ServicesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/services", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListComplete retrieves all the results into a single object +func (c StandardOperationClient) ServicesListComplete(ctx context.Context, id commonids.SubscriptionId) (ServicesListCompleteResult, error) { + return c.ServicesListCompleteMatchingPredicate(ctx, id, DataMigrationServiceOperationPredicate{}) +} + +// ServicesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) ServicesListCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate DataMigrationServiceOperationPredicate) (result ServicesListCompleteResult, err error) { + items := make([]DataMigrationService, 0) + + resp, err := c.ServicesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslistbyresourcegroup.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslistbyresourcegroup.go new file mode 100644 index 00000000000..3c1d1801f93 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslistbyresourcegroup.go @@ -0,0 +1,105 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListByResourceGroupOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]DataMigrationService +} + +type ServicesListByResourceGroupCompleteResult struct { + LatestHttpResponse *http.Response + Items []DataMigrationService +} + +type ServicesListByResourceGroupCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListByResourceGroupCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesListByResourceGroup ... +func (c StandardOperationClient) ServicesListByResourceGroup(ctx context.Context, id SubscriptionResourceGroupId) (result ServicesListByResourceGroupOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListByResourceGroupCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.DataMigration/services", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]DataMigrationService `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListByResourceGroupComplete retrieves all the results into a single object +func (c StandardOperationClient) ServicesListByResourceGroupComplete(ctx context.Context, id SubscriptionResourceGroupId) (ServicesListByResourceGroupCompleteResult, error) { + return c.ServicesListByResourceGroupCompleteMatchingPredicate(ctx, id, DataMigrationServiceOperationPredicate{}) +} + +// ServicesListByResourceGroupCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) ServicesListByResourceGroupCompleteMatchingPredicate(ctx context.Context, id SubscriptionResourceGroupId, predicate DataMigrationServiceOperationPredicate) (result ServicesListByResourceGroupCompleteResult, err error) { + items := make([]DataMigrationService, 0) + + resp, err := c.ServicesListByResourceGroup(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListByResourceGroupCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslistskus.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslistskus.go new file mode 100644 index 00000000000..fc460b54d16 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_serviceslistskus.go @@ -0,0 +1,105 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesListSkusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]AvailableServiceSku +} + +type ServicesListSkusCompleteResult struct { + LatestHttpResponse *http.Response + Items []AvailableServiceSku +} + +type ServicesListSkusCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServicesListSkusCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServicesListSkus ... +func (c StandardOperationClient) ServicesListSkus(ctx context.Context, id ServiceId) (result ServicesListSkusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ServicesListSkusCustomPager{}, + Path: fmt.Sprintf("%s/skus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]AvailableServiceSku `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServicesListSkusComplete retrieves all the results into a single object +func (c StandardOperationClient) ServicesListSkusComplete(ctx context.Context, id ServiceId) (ServicesListSkusCompleteResult, error) { + return c.ServicesListSkusCompleteMatchingPredicate(ctx, id, AvailableServiceSkuOperationPredicate{}) +} + +// ServicesListSkusCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) ServicesListSkusCompleteMatchingPredicate(ctx context.Context, id ServiceId, predicate AvailableServiceSkuOperationPredicate) (result ServicesListSkusCompleteResult, err error) { + items := make([]AvailableServiceSku, 0) + + resp, err := c.ServicesListSkus(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServicesListSkusCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesupdate.go new file mode 100644 index 00000000000..7849081ef57 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicesupdate.go @@ -0,0 +1,75 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServicesUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *DataMigrationService +} + +// ServicesUpdate ... +func (c StandardOperationClient) ServicesUpdate(ctx context.Context, id ServiceId, input DataMigrationService) (result ServicesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// ServicesUpdateThenPoll performs ServicesUpdate then polls until it's completed +func (c StandardOperationClient) ServicesUpdateThenPoll(ctx context.Context, id ServiceId, input DataMigrationService) error { + result, err := c.ServicesUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing ServicesUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after ServicesUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetaskscreateorupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetaskscreateorupdate.go new file mode 100644 index 00000000000..f85b2d9507d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetaskscreateorupdate.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksCreateOrUpdate ... +func (c StandardOperationClient) ServiceTasksCreateOrUpdate(ctx context.Context, id ServiceTaskId, input ProjectTask) (result ServiceTasksCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksdelete.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksdelete.go new file mode 100644 index 00000000000..c0e8177b21a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksdelete.go @@ -0,0 +1,77 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type ServiceTasksDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultServiceTasksDeleteOperationOptions() ServiceTasksDeleteOperationOptions { + return ServiceTasksDeleteOperationOptions{} +} + +func (o ServiceTasksDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// ServiceTasksDelete ... +func (c StandardOperationClient) ServiceTasksDelete(ctx context.Context, id ServiceTaskId, options ServiceTasksDeleteOperationOptions) (result ServiceTasksDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksget.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksget.go new file mode 100644 index 00000000000..7576196eab7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksget.go @@ -0,0 +1,83 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +type ServiceTasksGetOperationOptions struct { + Expand *string +} + +func DefaultServiceTasksGetOperationOptions() ServiceTasksGetOperationOptions { + return ServiceTasksGetOperationOptions{} +} + +func (o ServiceTasksGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + return &out +} + +// ServiceTasksGet ... +func (c StandardOperationClient) ServiceTasksGet(ctx context.Context, id ServiceTaskId, options ServiceTasksGetOperationOptions) (result ServiceTasksGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetaskslist.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetaskslist.go new file mode 100644 index 00000000000..5095ff607db --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetaskslist.go @@ -0,0 +1,134 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectTask +} + +type ServiceTasksListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectTask +} + +type ServiceTasksListOperationOptions struct { + TaskType *string +} + +func DefaultServiceTasksListOperationOptions() ServiceTasksListOperationOptions { + return ServiceTasksListOperationOptions{} +} + +func (o ServiceTasksListOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ServiceTasksListOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ServiceTasksListOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.TaskType != nil { + out.Append("taskType", fmt.Sprintf("%v", *o.TaskType)) + } + return &out +} + +type ServiceTasksListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ServiceTasksListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ServiceTasksList ... +func (c StandardOperationClient) ServiceTasksList(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions) (result ServiceTasksListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Pager: &ServiceTasksListCustomPager{}, + Path: fmt.Sprintf("%s/serviceTasks", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectTask `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ServiceTasksListComplete retrieves all the results into a single object +func (c StandardOperationClient) ServiceTasksListComplete(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions) (ServiceTasksListCompleteResult, error) { + return c.ServiceTasksListCompleteMatchingPredicate(ctx, id, options, ProjectTaskOperationPredicate{}) +} + +// ServiceTasksListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) ServiceTasksListCompleteMatchingPredicate(ctx context.Context, id ServiceId, options ServiceTasksListOperationOptions, predicate ProjectTaskOperationPredicate) (result ServiceTasksListCompleteResult, err error) { + items := make([]ProjectTask, 0) + + resp, err := c.ServiceTasksList(ctx, id, options) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ServiceTasksListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksupdate.go new file mode 100644 index 00000000000..788fe892ca9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_servicetasksupdate.go @@ -0,0 +1,57 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceTasksUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// ServiceTasksUpdate ... +func (c StandardOperationClient) ServiceTasksUpdate(ctx context.Context, id ServiceTaskId, input ProjectTask) (result ServiceTasksUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_taskscreateorupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_taskscreateorupdate.go new file mode 100644 index 00000000000..eb88253093d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_taskscreateorupdate.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksCreateOrUpdate ... +func (c StandardOperationClient) TasksCreateOrUpdate(ctx context.Context, id TaskId, input ProjectTask) (result TasksCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksdelete.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksdelete.go new file mode 100644 index 00000000000..a3d7a6f5597 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksdelete.go @@ -0,0 +1,77 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type TasksDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultTasksDeleteOperationOptions() TasksDeleteOperationOptions { + return TasksDeleteOperationOptions{} +} + +func (o TasksDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// TasksDelete ... +func (c StandardOperationClient) TasksDelete(ctx context.Context, id TaskId, options TasksDeleteOperationOptions) (result TasksDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksget.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksget.go new file mode 100644 index 00000000000..c5df301d16d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksget.go @@ -0,0 +1,83 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +type TasksGetOperationOptions struct { + Expand *string +} + +func DefaultTasksGetOperationOptions() TasksGetOperationOptions { + return TasksGetOperationOptions{} +} + +func (o TasksGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + return &out +} + +// TasksGet ... +func (c StandardOperationClient) TasksGet(ctx context.Context, id TaskId, options TasksGetOperationOptions) (result TasksGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_taskslist.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_taskslist.go new file mode 100644 index 00000000000..7699272a86d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_taskslist.go @@ -0,0 +1,134 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]ProjectTask +} + +type TasksListCompleteResult struct { + LatestHttpResponse *http.Response + Items []ProjectTask +} + +type TasksListOperationOptions struct { + TaskType *string +} + +func DefaultTasksListOperationOptions() TasksListOperationOptions { + return TasksListOperationOptions{} +} + +func (o TasksListOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksListOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksListOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.TaskType != nil { + out.Append("taskType", fmt.Sprintf("%v", *o.TaskType)) + } + return &out +} + +type TasksListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *TasksListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// TasksList ... +func (c StandardOperationClient) TasksList(ctx context.Context, id ProjectId, options TasksListOperationOptions) (result TasksListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Pager: &TasksListCustomPager{}, + Path: fmt.Sprintf("%s/tasks", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]ProjectTask `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// TasksListComplete retrieves all the results into a single object +func (c StandardOperationClient) TasksListComplete(ctx context.Context, id ProjectId, options TasksListOperationOptions) (TasksListCompleteResult, error) { + return c.TasksListCompleteMatchingPredicate(ctx, id, options, ProjectTaskOperationPredicate{}) +} + +// TasksListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) TasksListCompleteMatchingPredicate(ctx context.Context, id ProjectId, options TasksListOperationOptions, predicate ProjectTaskOperationPredicate) (result TasksListCompleteResult, err error) { + items := make([]ProjectTask, 0) + + resp, err := c.TasksList(ctx, id, options) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = TasksListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksupdate.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksupdate.go new file mode 100644 index 00000000000..3d93993f5cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_tasksupdate.go @@ -0,0 +1,57 @@ +package standardoperation + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksUpdate ... +func (c StandardOperationClient) TasksUpdate(ctx context.Context, id TaskId, input ProjectTask) (result TasksUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/method_usageslist.go b/resource-manager/datamigration/2025-06-30/standardoperation/method_usageslist.go new file mode 100644 index 00000000000..016d5b4019b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/method_usageslist.go @@ -0,0 +1,105 @@ +package standardoperation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UsagesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Quota +} + +type UsagesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Quota +} + +type UsagesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *UsagesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// UsagesList ... +func (c StandardOperationClient) UsagesList(ctx context.Context, id LocationId) (result UsagesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &UsagesListCustomPager{}, + Path: fmt.Sprintf("%s/usages", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Quota `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// UsagesListComplete retrieves all the results into a single object +func (c StandardOperationClient) UsagesListComplete(ctx context.Context, id LocationId) (UsagesListCompleteResult, error) { + return c.UsagesListCompleteMatchingPredicate(ctx, id, QuotaOperationPredicate{}) +} + +// UsagesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c StandardOperationClient) UsagesListCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate QuotaOperationPredicate) (result UsagesListCompleteResult, err error) { + items := make([]Quota, 0) + + resp, err := c.UsagesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = UsagesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_availableservicesku.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_availableservicesku.go new file mode 100644 index 00000000000..ad7a9ec9ab3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_availableservicesku.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSku struct { + Capacity *AvailableServiceSkuCapacity `json:"capacity,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + Sku *AvailableServiceSkuSku `json:"sku,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_availableserviceskucapacity.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_availableserviceskucapacity.go new file mode 100644 index 00000000000..be631078696 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_availableserviceskucapacity.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuCapacity struct { + Default *int64 `json:"default,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` + ScaleType *ServiceScalability `json:"scaleType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_availableserviceskusku.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_availableserviceskusku.go new file mode 100644 index 00000000000..4727b5cd679 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_availableserviceskusku.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuSku struct { + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..a7bc1caef39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_backupfileinfo.go new file mode 100644 index 00000000000..518c2f93101 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_backupsetinfo.go new file mode 100644 index 00000000000..33e7144bb5c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_blobshare.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_blobshare.go new file mode 100644 index 00000000000..06a2a7e601c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_blobshare.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_commandproperties.go new file mode 100644 index 00000000000..e355533004f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_commandproperties.go @@ -0,0 +1,85 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connectioninfo.go new file mode 100644 index 00000000000..a817745d831 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connectioninfo.go @@ -0,0 +1,117 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..77ae0ddaf20 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..19c92ecf821 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..b05f974633d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..7642d6e1dd8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..862706dd33a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..c01f20e1519 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..2c760452960 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..476a6f01951 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..83b0fe4065a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..90d79af181d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..2bbf68f7d40 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..36171d1a176 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..e2ae1c203ae --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..77befc58090 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..2b055019588 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..78b9b9875d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..f799468e0f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..2b656a5927c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..ace097e00bb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..89b29e3317e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..21c79475100 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..85a5461635c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..b497d1e196b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..cdce53a033d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..da60da9dfb5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..8c77e08cf36 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..be334e4e14f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..20a4cc4b33d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..3683ae46162 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..04d0da5f903 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..bfa1ff00797 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..a23f3362a3f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..420fd2fa103 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..634ff043c8e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..cad65277742 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..4b1cd928983 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..a00c46c22b7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..373008bae1a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..15faf58d28f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasebackupinfo.go new file mode 100644 index 00000000000..74d6e69c67f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasefileinfo.go new file mode 100644 index 00000000000..1962a790a06 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_databaseinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_databaseinfo.go new file mode 100644 index 00000000000..f735c8fad1c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_databaseinfo.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseInfo struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasesummaryresult.go new file mode 100644 index 00000000000..88262ee7a3c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_databasetable.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasetable.go new file mode 100644 index 00000000000..e7e5af5c618 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_databasetable.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..c02765b9a1c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..c52e2bac8a4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_datamigrationservice.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_datamigrationservice.go new file mode 100644 index 00000000000..1229d665e3c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_datamigrationservice.go @@ -0,0 +1,21 @@ +package standardoperation + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationService struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Kind *string `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DataMigrationServiceProperties `json:"properties,omitempty"` + Sku *ServiceSku `json:"sku,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_datamigrationserviceproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_datamigrationserviceproperties.go new file mode 100644 index 00000000000..0bd3c4466ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_datamigrationserviceproperties.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataMigrationServiceProperties struct { + AutoStopDelay *string `json:"autoStopDelay,omitempty"` + DeleteResourcesOnStop *bool `json:"deleteResourcesOnStop,omitempty"` + ProvisioningState *ServiceProvisioningState `json:"provisioningState,omitempty"` + PublicKey *string `json:"publicKey,omitempty"` + VirtualNicId *string `json:"virtualNicId,omitempty"` + VirtualSubnetId *string `json:"virtualSubnetId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_executionstatistics.go new file mode 100644 index 00000000000..7f4c6916f00 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_executionstatistics.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_fileshare.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_fileshare.go new file mode 100644 index 00000000000..b8843a2af5a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_fileshare.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_filestorageinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_filestorageinfo.go new file mode 100644 index 00000000000..98cfadd27a6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_filestorageinfo.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileStorageInfo struct { + Headers *map[string]string `json:"headers,omitempty"` + Uri *string `json:"uri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..061c2440476 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..0f1e7ead922 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..ae91b227435 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..171350ca48f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..f279f506d9f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..27a281f7e26 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..91f7eb9ea91 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..2bb2359fd2a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..0f6d2d9e897 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..5eeac1b25e2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..98e2b160918 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..30126301e71 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..987e63ff3a8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..bd9ab2763e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..9e9b4cff4f4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..e8928300697 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..2b93a6c1d77 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..a3307a41874 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..f03114ed088 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..297b62ca860 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..35d624d6ac0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..40ea2b1733a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..908f8fa98aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..72c425cce58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..c57c28dcc05 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..a8899078ed1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..2ef815bd2cc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..2a1e2353bc3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..518ea057737 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..21d0f272c58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..89ba9639692 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..04d7d9d06f6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..46fca83f276 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..476b4b4ee03 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..2db184f4232 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..6cb609cd686 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..1c9471161dc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..320aa2815a0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..5a74ccbc766 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..77e63f2651f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..7180e76a2f1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..f1759d2f3e9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..e0aba453ca6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..5d38a774188 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..cbf1999525e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..ceb17ecda47 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..d26b704f874 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..90fedca9be2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..37abd756ea1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..27f5595287f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..678ab382710 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..b36ce5bc91d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..ce0145c614d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..dd4305e7831 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..7f44a146217 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..e3e042fab1b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..8d525891f48 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..3ca7ec10031 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..d95bd03b19a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..94797970740 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..0418d7a838c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..f6fd5eec779 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..c10d21c2fcb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..56dca00efaf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..b8db98e2abc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..b0b8da102e6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..19db8816be5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..78eae9793e1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..bfecd140bce --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..cdfb0028704 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..42db2b80a36 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..53eb35d35dd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..76c07cc532e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..44e76acb8a0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..e3d145f485c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..408dd58ff72 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..be4e4b840d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..23b0d2b56c2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..e09ece06cee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..a362f46d57c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..a90b2b76d96 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..396df0a9bf5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..b63529be27d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..77c96eb632c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..c75cad1b3a0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..275cc7947e1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..f6d38bbed9c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..9cc346b1f32 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..3f421dc6a61 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..aa37a182f75 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..aade3634a05 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..dc251345666 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskinput.go new file mode 100644 index 00000000000..cbccefd55aa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutput.go new file mode 100644 index 00000000000..ff998744882 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..7467dbbe29d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..34f1abf1652 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskproperties.go new file mode 100644 index 00000000000..93f38c28564 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..42a48ac77be --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..ea32e5edc5e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..c1984c74ba3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..b3de2426c3c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationreportresult.go new file mode 100644 index 00000000000..98300b226f7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..404bdf9be50 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..b125e7c847d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationresult.go new file mode 100644 index 00000000000..ed478b25729 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..8f85520417e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..5e6b9830dd5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..46d8be9acb6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..f6b96b3d808 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..d605e737340 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..9f647ecb19e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "MongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..282f3c78507 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..981c255c476 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..17cdd437e92 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodberror.go new file mode 100644 index 00000000000..1842a1847dd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodberror.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..6c6ec6e7a4e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..6693a25fb28 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbprogress.go new file mode 100644 index 00000000000..efa9cd92a74 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..1dad735fa92 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..e787439e186 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..0669af3c30a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..53eb9f20987 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..16a53f3a63a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_nameavailabilityrequest.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_nameavailabilityrequest.go new file mode 100644 index 00000000000..088132ee661 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_nameavailabilityrequest.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NameAvailabilityRequest struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_nameavailabilityresponse.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_nameavailabilityresponse.go new file mode 100644 index 00000000000..5e6b1cb6c85 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_nameavailabilityresponse.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NameAvailabilityResponse struct { + Message *string `json:"message,omitempty"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason *NameCheckFailureReason `json:"reason,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_odataerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_odataerror.go new file mode 100644 index 00000000000..a21c9dea098 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_odataerror.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..1f15e762aac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_orphaneduserinfo.go new file mode 100644 index 00000000000..48606cab2cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..1829aa70e08 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_project.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_project.go new file mode 100644 index 00000000000..2f3f2e18944 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_project.go @@ -0,0 +1,19 @@ +package standardoperation + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Project struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_projectfile.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_projectfile.go new file mode 100644 index 00000000000..1f0c055eaf5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_projectfile.go @@ -0,0 +1,17 @@ +package standardoperation + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFile struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *ProjectFileProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_projectfileproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_projectfileproperties.go new file mode 100644 index 00000000000..190aff15fd1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_projectfileproperties.go @@ -0,0 +1,30 @@ +package standardoperation + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectFileProperties struct { + Extension *string `json:"extension,omitempty"` + FilePath *string `json:"filePath,omitempty"` + LastModified *string `json:"lastModified,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + Size *int64 `json:"size,omitempty"` +} + +func (o *ProjectFileProperties) GetLastModifiedAsTime() (*time.Time, error) { + if o.LastModified == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModified, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectFileProperties) SetLastModifiedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModified = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_projectproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_projectproperties.go new file mode 100644 index 00000000000..a7d398012ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_projectproperties.go @@ -0,0 +1,81 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectProperties struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourceConnectionInfo ConnectionInfo `json:"sourceConnectionInfo"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetConnectionInfo ConnectionInfo `json:"targetConnectionInfo"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` +} + +func (o *ProjectProperties) GetCreationTimeAsTime() (*time.Time, error) { + if o.CreationTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CreationTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *ProjectProperties) SetCreationTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationTime = &formatted +} + +var _ json.Unmarshaler = &ProjectProperties{} + +func (s *ProjectProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + AzureAuthenticationInfo *AzureActiveDirectoryApp `json:"azureAuthenticationInfo,omitempty"` + CreationTime *string `json:"creationTime,omitempty"` + DatabasesInfo *[]DatabaseInfo `json:"databasesInfo,omitempty"` + ProvisioningState *ProjectProvisioningState `json:"provisioningState,omitempty"` + SourcePlatform ProjectSourcePlatform `json:"sourcePlatform"` + TargetPlatform ProjectTargetPlatform `json:"targetPlatform"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.AzureAuthenticationInfo = decoded.AzureAuthenticationInfo + s.CreationTime = decoded.CreationTime + s.DatabasesInfo = decoded.DatabasesInfo + s.ProvisioningState = decoded.ProvisioningState + s.SourcePlatform = decoded.SourcePlatform + s.TargetPlatform = decoded.TargetPlatform + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["sourceConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'SourceConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.SourceConnectionInfo = impl + } + + if v, ok := temp["targetConnectionInfo"]; ok { + impl, err := UnmarshalConnectionInfoImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'TargetConnectionInfo' for 'ProjectProperties': %+v", err) + } + s.TargetConnectionInfo = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_projecttask.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_projecttask.go new file mode 100644 index 00000000000..8fee608ca0c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_projecttask.go @@ -0,0 +1,56 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_projecttaskproperties.go new file mode 100644 index 00000000000..c26aa7cadc1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..3abd2b09405 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_queryexecutionresult.go new file mode 100644 index 00000000000..dc73c5d4c79 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_quota.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_quota.go new file mode 100644 index 00000000000..1a26e227c4c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_quota.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Quota struct { + CurrentValue *float64 `json:"currentValue,omitempty"` + Id *string `json:"id,omitempty"` + Limit *float64 `json:"limit,omitempty"` + Name *QuotaName `json:"name,omitempty"` + Unit *string `json:"unit,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_quotaname.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_quotaname.go new file mode 100644 index 00000000000..81863a7fd3e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_quotaname.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QuotaName struct { + LocalizedValue *string `json:"localizedValue,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_reportableexception.go new file mode 100644 index 00000000000..eb016d5302f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_reportableexception.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_resourcesku.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourcesku.go new file mode 100644 index 00000000000..7c6ce53f2d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourcesku.go @@ -0,0 +1,19 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSku struct { + ApiVersions *[]string `json:"apiVersions,omitempty"` + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + Family *string `json:"family,omitempty"` + Kind *string `json:"kind,omitempty"` + Locations *[]string `json:"locations,omitempty"` + Name *string `json:"name,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucapabilities.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucapabilities.go new file mode 100644 index 00000000000..ce1045bd2c6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucapabilities.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuCapabilities struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucapacity.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucapacity.go new file mode 100644 index 00000000000..99e4d7488ec --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucapacity.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuCapacity struct { + Default *int64 `json:"default,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + Minimum *int64 `json:"minimum,omitempty"` + ScaleType *ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucosts.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucosts.go new file mode 100644 index 00000000000..c9cf9f34933 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskucosts.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuCosts struct { + ExtendedUnit *string `json:"extendedUnit,omitempty"` + MeterID *string `json:"meterID,omitempty"` + Quantity *int64 `json:"quantity,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskurestrictions.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskurestrictions.go new file mode 100644 index 00000000000..be50c139751 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_resourceskurestrictions.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceSkuRestrictions struct { + ReasonCode *ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` + Type *ResourceSkuRestrictionsType `json:"type,omitempty"` + Values *[]string `json:"values,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..6014e8145d9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..e37d06e5bd9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_selectedcertificateinput.go new file mode 100644 index 00000000000..f90ab97ec64 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_serverproperties.go new file mode 100644 index 00000000000..d38f963efd7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_serverproperties.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_servicesku.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_servicesku.go new file mode 100644 index 00000000000..dc848b2941e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_servicesku.go @@ -0,0 +1,12 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServiceSku struct { + Capacity *int64 `json:"capacity,omitempty"` + Family *string `json:"family,omitempty"` + Name *string `json:"name,omitempty"` + Size *string `json:"size,omitempty"` + Tier *string `json:"tier,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..5bfade16b95 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..5d913e91c33 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_ssismigrationinfo.go new file mode 100644 index 00000000000..83326974c1d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..b3b1cf8f6b0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..5fe9e3ada53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..acceed37fd0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..b940cc2b66b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..0248ed64143 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..ac451018e63 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..b53647f19b4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..a19fd3f6206 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..0926021ed1e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..870c4f8d64e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package standardoperation + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..ee924fe1d56 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..6b177cc0d5b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..b32d25bf77e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_validationerror.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_validationerror.go new file mode 100644 index 00000000000..416bdafc455 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_validationerror.go @@ -0,0 +1,9 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/standardoperation/model_waitstatistics.go new file mode 100644 index 00000000000..764f04979d4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/model_waitstatistics.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/predicates.go b/resource-manager/datamigration/2025-06-30/standardoperation/predicates.go new file mode 100644 index 00000000000..a6de023e49d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/predicates.go @@ -0,0 +1,210 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AvailableServiceSkuOperationPredicate struct { + ResourceType *string +} + +func (p AvailableServiceSkuOperationPredicate) Matches(input AvailableServiceSku) bool { + + if p.ResourceType != nil && (input.ResourceType == nil || *p.ResourceType != *input.ResourceType) { + return false + } + + return true +} + +type DataMigrationServiceOperationPredicate struct { + Etag *string + Id *string + Kind *string + Location *string + Name *string + Type *string +} + +func (p DataMigrationServiceOperationPredicate) Matches(input DataMigrationService) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Kind != nil && (input.Kind == nil || *p.Kind != *input.Kind) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type ProjectOperationPredicate struct { + Etag *string + Id *string + Location *string + Name *string + Type *string +} + +func (p ProjectOperationPredicate) Matches(input Project) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && (input.Location == nil || *p.Location != *input.Location) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type ProjectFileOperationPredicate struct { + Etag *string + Id *string + Name *string + Type *string +} + +func (p ProjectFileOperationPredicate) Matches(input ProjectFile) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type ProjectTaskOperationPredicate struct { + Etag *string + Id *string + Name *string + Type *string +} + +func (p ProjectTaskOperationPredicate) Matches(input ProjectTask) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type QuotaOperationPredicate struct { + CurrentValue *float64 + Id *string + Limit *float64 + Unit *string +} + +func (p QuotaOperationPredicate) Matches(input Quota) bool { + + if p.CurrentValue != nil && (input.CurrentValue == nil || *p.CurrentValue != *input.CurrentValue) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Limit != nil && (input.Limit == nil || *p.Limit != *input.Limit) { + return false + } + + if p.Unit != nil && (input.Unit == nil || *p.Unit != *input.Unit) { + return false + } + + return true +} + +type ResourceSkuOperationPredicate struct { + Family *string + Kind *string + Name *string + ResourceType *string + Size *string + Tier *string +} + +func (p ResourceSkuOperationPredicate) Matches(input ResourceSku) bool { + + if p.Family != nil && (input.Family == nil || *p.Family != *input.Family) { + return false + } + + if p.Kind != nil && (input.Kind == nil || *p.Kind != *input.Kind) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.ResourceType != nil && (input.ResourceType == nil || *p.ResourceType != *input.ResourceType) { + return false + } + + if p.Size != nil && (input.Size == nil || *p.Size != *input.Size) { + return false + } + + if p.Tier != nil && (input.Tier == nil || *p.Tier != *input.Tier) { + return false + } + + return true +} diff --git a/resource-manager/datamigration/2025-06-30/standardoperation/version.go b/resource-manager/datamigration/2025-06-30/standardoperation/version.go new file mode 100644 index 00000000000..5db5ccd9672 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/standardoperation/version.go @@ -0,0 +1,10 @@ +package standardoperation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/standardoperation/2025-06-30" +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/README.md b/resource-manager/datamigration/2025-06-30/taskresource/README.md new file mode 100644 index 00000000000..8d083558426 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/README.md @@ -0,0 +1,131 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/taskresource` Documentation + +The `taskresource` SDK allows for interaction with Azure Resource Manager `datamigration` (API Version `2025-06-30`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/datamigration/2025-06-30/taskresource" +``` + + +### Client Initialization + +```go +client := taskresource.NewTaskResourceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `TaskResourceClient.TasksCancel` + +```go +ctx := context.TODO() +id := taskresource.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksCancel(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `TaskResourceClient.TasksCommand` + +```go +ctx := context.TODO() +id := taskresource.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := taskresource.CommandProperties{ + // ... +} + + +read, err := client.TasksCommand(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `TaskResourceClient.TasksCreateOrUpdate` + +```go +ctx := context.TODO() +id := taskresource.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := taskresource.ProjectTask{ + // ... +} + + +read, err := client.TasksCreateOrUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `TaskResourceClient.TasksDelete` + +```go +ctx := context.TODO() +id := taskresource.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksDelete(ctx, id, taskresource.DefaultTasksDeleteOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `TaskResourceClient.TasksGet` + +```go +ctx := context.TODO() +id := taskresource.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +read, err := client.TasksGet(ctx, id, taskresource.DefaultTasksGetOperationOptions()) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `TaskResourceClient.TasksUpdate` + +```go +ctx := context.TODO() +id := taskresource.NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + +payload := taskresource.ProjectTask{ + // ... +} + + +read, err := client.TasksUpdate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/datamigration/2025-06-30/taskresource/client.go b/resource-manager/datamigration/2025-06-30/taskresource/client.go new file mode 100644 index 00000000000..c5e05f923c8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/client.go @@ -0,0 +1,26 @@ +package taskresource + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TaskResourceClient struct { + Client *resourcemanager.Client +} + +func NewTaskResourceClientWithBaseURI(sdkApi sdkEnv.Api) (*TaskResourceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "taskresource", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating TaskResourceClient: %+v", err) + } + + return &TaskResourceClient{ + Client: client, + }, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/constants.go b/resource-manager/datamigration/2025-06-30/taskresource/constants.go new file mode 100644 index 00000000000..06aa611fcee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/constants.go @@ -0,0 +1,2105 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthenticationType string + +const ( + AuthenticationTypeActiveDirectoryIntegrated AuthenticationType = "ActiveDirectoryIntegrated" + AuthenticationTypeActiveDirectoryPassword AuthenticationType = "ActiveDirectoryPassword" + AuthenticationTypeNone AuthenticationType = "None" + AuthenticationTypeSqlAuthentication AuthenticationType = "SqlAuthentication" + AuthenticationTypeWindowsAuthentication AuthenticationType = "WindowsAuthentication" +) + +func PossibleValuesForAuthenticationType() []string { + return []string{ + string(AuthenticationTypeActiveDirectoryIntegrated), + string(AuthenticationTypeActiveDirectoryPassword), + string(AuthenticationTypeNone), + string(AuthenticationTypeSqlAuthentication), + string(AuthenticationTypeWindowsAuthentication), + } +} + +func (s *AuthenticationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthenticationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthenticationType(input string) (*AuthenticationType, error) { + vals := map[string]AuthenticationType{ + "activedirectoryintegrated": AuthenticationTypeActiveDirectoryIntegrated, + "activedirectorypassword": AuthenticationTypeActiveDirectoryPassword, + "none": AuthenticationTypeNone, + "sqlauthentication": AuthenticationTypeSqlAuthentication, + "windowsauthentication": AuthenticationTypeWindowsAuthentication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthenticationType(input) + return &out, nil +} + +type BackupFileStatus string + +const ( + BackupFileStatusArrived BackupFileStatus = "Arrived" + BackupFileStatusCancelled BackupFileStatus = "Cancelled" + BackupFileStatusQueued BackupFileStatus = "Queued" + BackupFileStatusRestored BackupFileStatus = "Restored" + BackupFileStatusRestoring BackupFileStatus = "Restoring" + BackupFileStatusUploaded BackupFileStatus = "Uploaded" + BackupFileStatusUploading BackupFileStatus = "Uploading" +) + +func PossibleValuesForBackupFileStatus() []string { + return []string{ + string(BackupFileStatusArrived), + string(BackupFileStatusCancelled), + string(BackupFileStatusQueued), + string(BackupFileStatusRestored), + string(BackupFileStatusRestoring), + string(BackupFileStatusUploaded), + string(BackupFileStatusUploading), + } +} + +func (s *BackupFileStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupFileStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupFileStatus(input string) (*BackupFileStatus, error) { + vals := map[string]BackupFileStatus{ + "arrived": BackupFileStatusArrived, + "cancelled": BackupFileStatusCancelled, + "queued": BackupFileStatusQueued, + "restored": BackupFileStatusRestored, + "restoring": BackupFileStatusRestoring, + "uploaded": BackupFileStatusUploaded, + "uploading": BackupFileStatusUploading, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupFileStatus(input) + return &out, nil +} + +type BackupMode string + +const ( + BackupModeCreateBackup BackupMode = "CreateBackup" + BackupModeExistingBackup BackupMode = "ExistingBackup" +) + +func PossibleValuesForBackupMode() []string { + return []string{ + string(BackupModeCreateBackup), + string(BackupModeExistingBackup), + } +} + +func (s *BackupMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupMode(input string) (*BackupMode, error) { + vals := map[string]BackupMode{ + "createbackup": BackupModeCreateBackup, + "existingbackup": BackupModeExistingBackup, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupMode(input) + return &out, nil +} + +type BackupType string + +const ( + BackupTypeDatabase BackupType = "Database" + BackupTypeDifferentialDatabase BackupType = "DifferentialDatabase" + BackupTypeDifferentialFile BackupType = "DifferentialFile" + BackupTypeDifferentialPartial BackupType = "DifferentialPartial" + BackupTypeFile BackupType = "File" + BackupTypePartial BackupType = "Partial" + BackupTypeTransactionLog BackupType = "TransactionLog" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeDatabase), + string(BackupTypeDifferentialDatabase), + string(BackupTypeDifferentialFile), + string(BackupTypeDifferentialPartial), + string(BackupTypeFile), + string(BackupTypePartial), + string(BackupTypeTransactionLog), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "database": BackupTypeDatabase, + "differentialdatabase": BackupTypeDifferentialDatabase, + "differentialfile": BackupTypeDifferentialFile, + "differentialpartial": BackupTypeDifferentialPartial, + "file": BackupTypeFile, + "partial": BackupTypePartial, + "transactionlog": BackupTypeTransactionLog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type CommandState string + +const ( + CommandStateAccepted CommandState = "Accepted" + CommandStateFailed CommandState = "Failed" + CommandStateRunning CommandState = "Running" + CommandStateSucceeded CommandState = "Succeeded" + CommandStateUnknown CommandState = "Unknown" +) + +func PossibleValuesForCommandState() []string { + return []string{ + string(CommandStateAccepted), + string(CommandStateFailed), + string(CommandStateRunning), + string(CommandStateSucceeded), + string(CommandStateUnknown), + } +} + +func (s *CommandState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandState(input string) (*CommandState, error) { + vals := map[string]CommandState{ + "accepted": CommandStateAccepted, + "failed": CommandStateFailed, + "running": CommandStateRunning, + "succeeded": CommandStateSucceeded, + "unknown": CommandStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandState(input) + return &out, nil +} + +type CommandType string + +const ( + CommandTypeCancel CommandType = "cancel" + CommandTypeFinish CommandType = "finish" + CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete CommandType = "Migrate.SqlServer.AzureDbSqlMi.Complete" + CommandTypeMigratePointSyncPointCompletePointDatabase CommandType = "Migrate.Sync.Complete.Database" + CommandTypeRestart CommandType = "restart" +) + +func PossibleValuesForCommandType() []string { + return []string{ + string(CommandTypeCancel), + string(CommandTypeFinish), + string(CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete), + string(CommandTypeMigratePointSyncPointCompletePointDatabase), + string(CommandTypeRestart), + } +} + +func (s *CommandType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCommandType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCommandType(input string) (*CommandType, error) { + vals := map[string]CommandType{ + "cancel": CommandTypeCancel, + "finish": CommandTypeFinish, + "migrate.sqlserver.azuredbsqlmi.complete": CommandTypeMigratePointSqlServerPointAzureDbSqlMiPointComplete, + "migrate.sync.complete.database": CommandTypeMigratePointSyncPointCompletePointDatabase, + "restart": CommandTypeRestart, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CommandType(input) + return &out, nil +} + +type DatabaseCompatLevel string + +const ( + DatabaseCompatLevelCompatLevelEightZero DatabaseCompatLevel = "CompatLevel80" + DatabaseCompatLevelCompatLevelNineZero DatabaseCompatLevel = "CompatLevel90" + DatabaseCompatLevelCompatLevelOneFourZero DatabaseCompatLevel = "CompatLevel140" + DatabaseCompatLevelCompatLevelOneHundred DatabaseCompatLevel = "CompatLevel100" + DatabaseCompatLevelCompatLevelOneOneZero DatabaseCompatLevel = "CompatLevel110" + DatabaseCompatLevelCompatLevelOneThreeZero DatabaseCompatLevel = "CompatLevel130" + DatabaseCompatLevelCompatLevelOneTwoZero DatabaseCompatLevel = "CompatLevel120" +) + +func PossibleValuesForDatabaseCompatLevel() []string { + return []string{ + string(DatabaseCompatLevelCompatLevelEightZero), + string(DatabaseCompatLevelCompatLevelNineZero), + string(DatabaseCompatLevelCompatLevelOneFourZero), + string(DatabaseCompatLevelCompatLevelOneHundred), + string(DatabaseCompatLevelCompatLevelOneOneZero), + string(DatabaseCompatLevelCompatLevelOneThreeZero), + string(DatabaseCompatLevelCompatLevelOneTwoZero), + } +} + +func (s *DatabaseCompatLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseCompatLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseCompatLevel(input string) (*DatabaseCompatLevel, error) { + vals := map[string]DatabaseCompatLevel{ + "compatlevel80": DatabaseCompatLevelCompatLevelEightZero, + "compatlevel90": DatabaseCompatLevelCompatLevelNineZero, + "compatlevel140": DatabaseCompatLevelCompatLevelOneFourZero, + "compatlevel100": DatabaseCompatLevelCompatLevelOneHundred, + "compatlevel110": DatabaseCompatLevelCompatLevelOneOneZero, + "compatlevel130": DatabaseCompatLevelCompatLevelOneThreeZero, + "compatlevel120": DatabaseCompatLevelCompatLevelOneTwoZero, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseCompatLevel(input) + return &out, nil +} + +type DatabaseFileType string + +const ( + DatabaseFileTypeFilestream DatabaseFileType = "Filestream" + DatabaseFileTypeFulltext DatabaseFileType = "Fulltext" + DatabaseFileTypeLog DatabaseFileType = "Log" + DatabaseFileTypeNotSupported DatabaseFileType = "NotSupported" + DatabaseFileTypeRows DatabaseFileType = "Rows" +) + +func PossibleValuesForDatabaseFileType() []string { + return []string{ + string(DatabaseFileTypeFilestream), + string(DatabaseFileTypeFulltext), + string(DatabaseFileTypeLog), + string(DatabaseFileTypeNotSupported), + string(DatabaseFileTypeRows), + } +} + +func (s *DatabaseFileType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseFileType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseFileType(input string) (*DatabaseFileType, error) { + vals := map[string]DatabaseFileType{ + "filestream": DatabaseFileTypeFilestream, + "fulltext": DatabaseFileTypeFulltext, + "log": DatabaseFileTypeLog, + "notsupported": DatabaseFileTypeNotSupported, + "rows": DatabaseFileTypeRows, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseFileType(input) + return &out, nil +} + +type DatabaseMigrationStage string + +const ( + DatabaseMigrationStageBackup DatabaseMigrationStage = "Backup" + DatabaseMigrationStageCompleted DatabaseMigrationStage = "Completed" + DatabaseMigrationStageFileCopy DatabaseMigrationStage = "FileCopy" + DatabaseMigrationStageInitialize DatabaseMigrationStage = "Initialize" + DatabaseMigrationStageNone DatabaseMigrationStage = "None" + DatabaseMigrationStageRestore DatabaseMigrationStage = "Restore" +) + +func PossibleValuesForDatabaseMigrationStage() []string { + return []string{ + string(DatabaseMigrationStageBackup), + string(DatabaseMigrationStageCompleted), + string(DatabaseMigrationStageFileCopy), + string(DatabaseMigrationStageInitialize), + string(DatabaseMigrationStageNone), + string(DatabaseMigrationStageRestore), + } +} + +func (s *DatabaseMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationStage(input string) (*DatabaseMigrationStage, error) { + vals := map[string]DatabaseMigrationStage{ + "backup": DatabaseMigrationStageBackup, + "completed": DatabaseMigrationStageCompleted, + "filecopy": DatabaseMigrationStageFileCopy, + "initialize": DatabaseMigrationStageInitialize, + "none": DatabaseMigrationStageNone, + "restore": DatabaseMigrationStageRestore, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationStage(input) + return &out, nil +} + +type DatabaseMigrationState string + +const ( + DatabaseMigrationStateCANCELLED DatabaseMigrationState = "CANCELLED" + DatabaseMigrationStateCOMPLETED DatabaseMigrationState = "COMPLETED" + DatabaseMigrationStateCUTOVERSTART DatabaseMigrationState = "CUTOVER_START" + DatabaseMigrationStateFAILED DatabaseMigrationState = "FAILED" + DatabaseMigrationStateFULLBACKUPUPLOADSTART DatabaseMigrationState = "FULL_BACKUP_UPLOAD_START" + DatabaseMigrationStateINITIAL DatabaseMigrationState = "INITIAL" + DatabaseMigrationStateLOGSHIPPINGSTART DatabaseMigrationState = "LOG_SHIPPING_START" + DatabaseMigrationStatePOSTCUTOVERCOMPLETE DatabaseMigrationState = "POST_CUTOVER_COMPLETE" + DatabaseMigrationStateUNDEFINED DatabaseMigrationState = "UNDEFINED" + DatabaseMigrationStateUPLOADLOGFILESSTART DatabaseMigrationState = "UPLOAD_LOG_FILES_START" +) + +func PossibleValuesForDatabaseMigrationState() []string { + return []string{ + string(DatabaseMigrationStateCANCELLED), + string(DatabaseMigrationStateCOMPLETED), + string(DatabaseMigrationStateCUTOVERSTART), + string(DatabaseMigrationStateFAILED), + string(DatabaseMigrationStateFULLBACKUPUPLOADSTART), + string(DatabaseMigrationStateINITIAL), + string(DatabaseMigrationStateLOGSHIPPINGSTART), + string(DatabaseMigrationStatePOSTCUTOVERCOMPLETE), + string(DatabaseMigrationStateUNDEFINED), + string(DatabaseMigrationStateUPLOADLOGFILESSTART), + } +} + +func (s *DatabaseMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseMigrationState(input string) (*DatabaseMigrationState, error) { + vals := map[string]DatabaseMigrationState{ + "cancelled": DatabaseMigrationStateCANCELLED, + "completed": DatabaseMigrationStateCOMPLETED, + "cutover_start": DatabaseMigrationStateCUTOVERSTART, + "failed": DatabaseMigrationStateFAILED, + "full_backup_upload_start": DatabaseMigrationStateFULLBACKUPUPLOADSTART, + "initial": DatabaseMigrationStateINITIAL, + "log_shipping_start": DatabaseMigrationStateLOGSHIPPINGSTART, + "post_cutover_complete": DatabaseMigrationStatePOSTCUTOVERCOMPLETE, + "undefined": DatabaseMigrationStateUNDEFINED, + "upload_log_files_start": DatabaseMigrationStateUPLOADLOGFILESSTART, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseMigrationState(input) + return &out, nil +} + +type DatabaseState string + +const ( + DatabaseStateCopying DatabaseState = "Copying" + DatabaseStateEmergency DatabaseState = "Emergency" + DatabaseStateOffline DatabaseState = "Offline" + DatabaseStateOfflineSecondary DatabaseState = "OfflineSecondary" + DatabaseStateOnline DatabaseState = "Online" + DatabaseStateRecovering DatabaseState = "Recovering" + DatabaseStateRecoveryPending DatabaseState = "RecoveryPending" + DatabaseStateRestoring DatabaseState = "Restoring" + DatabaseStateSuspect DatabaseState = "Suspect" +) + +func PossibleValuesForDatabaseState() []string { + return []string{ + string(DatabaseStateCopying), + string(DatabaseStateEmergency), + string(DatabaseStateOffline), + string(DatabaseStateOfflineSecondary), + string(DatabaseStateOnline), + string(DatabaseStateRecovering), + string(DatabaseStateRecoveryPending), + string(DatabaseStateRestoring), + string(DatabaseStateSuspect), + } +} + +func (s *DatabaseState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseDatabaseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseDatabaseState(input string) (*DatabaseState, error) { + vals := map[string]DatabaseState{ + "copying": DatabaseStateCopying, + "emergency": DatabaseStateEmergency, + "offline": DatabaseStateOffline, + "offlinesecondary": DatabaseStateOfflineSecondary, + "online": DatabaseStateOnline, + "recovering": DatabaseStateRecovering, + "recoverypending": DatabaseStateRecoveryPending, + "restoring": DatabaseStateRestoring, + "suspect": DatabaseStateSuspect, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := DatabaseState(input) + return &out, nil +} + +type LoginMigrationStage string + +const ( + LoginMigrationStageAssignRoleMembership LoginMigrationStage = "AssignRoleMembership" + LoginMigrationStageAssignRoleOwnership LoginMigrationStage = "AssignRoleOwnership" + LoginMigrationStageCompleted LoginMigrationStage = "Completed" + LoginMigrationStageEstablishObjectPermissions LoginMigrationStage = "EstablishObjectPermissions" + LoginMigrationStageEstablishServerPermissions LoginMigrationStage = "EstablishServerPermissions" + LoginMigrationStageEstablishUserMapping LoginMigrationStage = "EstablishUserMapping" + LoginMigrationStageInitialize LoginMigrationStage = "Initialize" + LoginMigrationStageLoginMigration LoginMigrationStage = "LoginMigration" + LoginMigrationStageNone LoginMigrationStage = "None" +) + +func PossibleValuesForLoginMigrationStage() []string { + return []string{ + string(LoginMigrationStageAssignRoleMembership), + string(LoginMigrationStageAssignRoleOwnership), + string(LoginMigrationStageCompleted), + string(LoginMigrationStageEstablishObjectPermissions), + string(LoginMigrationStageEstablishServerPermissions), + string(LoginMigrationStageEstablishUserMapping), + string(LoginMigrationStageInitialize), + string(LoginMigrationStageLoginMigration), + string(LoginMigrationStageNone), + } +} + +func (s *LoginMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginMigrationStage(input string) (*LoginMigrationStage, error) { + vals := map[string]LoginMigrationStage{ + "assignrolemembership": LoginMigrationStageAssignRoleMembership, + "assignroleownership": LoginMigrationStageAssignRoleOwnership, + "completed": LoginMigrationStageCompleted, + "establishobjectpermissions": LoginMigrationStageEstablishObjectPermissions, + "establishserverpermissions": LoginMigrationStageEstablishServerPermissions, + "establishusermapping": LoginMigrationStageEstablishUserMapping, + "initialize": LoginMigrationStageInitialize, + "loginmigration": LoginMigrationStageLoginMigration, + "none": LoginMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginMigrationStage(input) + return &out, nil +} + +type LoginType string + +const ( + LoginTypeAsymmetricKey LoginType = "AsymmetricKey" + LoginTypeCertificate LoginType = "Certificate" + LoginTypeExternalGroup LoginType = "ExternalGroup" + LoginTypeExternalUser LoginType = "ExternalUser" + LoginTypeSqlLogin LoginType = "SqlLogin" + LoginTypeWindowsGroup LoginType = "WindowsGroup" + LoginTypeWindowsUser LoginType = "WindowsUser" +) + +func PossibleValuesForLoginType() []string { + return []string{ + string(LoginTypeAsymmetricKey), + string(LoginTypeCertificate), + string(LoginTypeExternalGroup), + string(LoginTypeExternalUser), + string(LoginTypeSqlLogin), + string(LoginTypeWindowsGroup), + string(LoginTypeWindowsUser), + } +} + +func (s *LoginType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseLoginType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseLoginType(input string) (*LoginType, error) { + vals := map[string]LoginType{ + "asymmetrickey": LoginTypeAsymmetricKey, + "certificate": LoginTypeCertificate, + "externalgroup": LoginTypeExternalGroup, + "externaluser": LoginTypeExternalUser, + "sqllogin": LoginTypeSqlLogin, + "windowsgroup": LoginTypeWindowsGroup, + "windowsuser": LoginTypeWindowsUser, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := LoginType(input) + return &out, nil +} + +type MigrationState string + +const ( + MigrationStateCompleted MigrationState = "Completed" + MigrationStateFailed MigrationState = "Failed" + MigrationStateInProgress MigrationState = "InProgress" + MigrationStateNone MigrationState = "None" + MigrationStateSkipped MigrationState = "Skipped" + MigrationStateStopped MigrationState = "Stopped" + MigrationStateWarning MigrationState = "Warning" +) + +func PossibleValuesForMigrationState() []string { + return []string{ + string(MigrationStateCompleted), + string(MigrationStateFailed), + string(MigrationStateInProgress), + string(MigrationStateNone), + string(MigrationStateSkipped), + string(MigrationStateStopped), + string(MigrationStateWarning), + } +} + +func (s *MigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationState(input string) (*MigrationState, error) { + vals := map[string]MigrationState{ + "completed": MigrationStateCompleted, + "failed": MigrationStateFailed, + "inprogress": MigrationStateInProgress, + "none": MigrationStateNone, + "skipped": MigrationStateSkipped, + "stopped": MigrationStateStopped, + "warning": MigrationStateWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationState(input) + return &out, nil +} + +type MigrationStatus string + +const ( + MigrationStatusCompleted MigrationStatus = "Completed" + MigrationStatusCompletedWithWarnings MigrationStatus = "CompletedWithWarnings" + MigrationStatusConfigured MigrationStatus = "Configured" + MigrationStatusConnecting MigrationStatus = "Connecting" + MigrationStatusDefault MigrationStatus = "Default" + MigrationStatusError MigrationStatus = "Error" + MigrationStatusRunning MigrationStatus = "Running" + MigrationStatusSelectLogins MigrationStatus = "SelectLogins" + MigrationStatusSourceAndTargetSelected MigrationStatus = "SourceAndTargetSelected" + MigrationStatusStopped MigrationStatus = "Stopped" +) + +func PossibleValuesForMigrationStatus() []string { + return []string{ + string(MigrationStatusCompleted), + string(MigrationStatusCompletedWithWarnings), + string(MigrationStatusConfigured), + string(MigrationStatusConnecting), + string(MigrationStatusDefault), + string(MigrationStatusError), + string(MigrationStatusRunning), + string(MigrationStatusSelectLogins), + string(MigrationStatusSourceAndTargetSelected), + string(MigrationStatusStopped), + } +} + +func (s *MigrationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMigrationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMigrationStatus(input string) (*MigrationStatus, error) { + vals := map[string]MigrationStatus{ + "completed": MigrationStatusCompleted, + "completedwithwarnings": MigrationStatusCompletedWithWarnings, + "configured": MigrationStatusConfigured, + "connecting": MigrationStatusConnecting, + "default": MigrationStatusDefault, + "error": MigrationStatusError, + "running": MigrationStatusRunning, + "selectlogins": MigrationStatusSelectLogins, + "sourceandtargetselected": MigrationStatusSourceAndTargetSelected, + "stopped": MigrationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MigrationStatus(input) + return &out, nil +} + +type MongoDbClusterType string + +const ( + MongoDbClusterTypeBlobContainer MongoDbClusterType = "BlobContainer" + MongoDbClusterTypeCosmosDb MongoDbClusterType = "CosmosDb" + MongoDbClusterTypeMongoDb MongoDbClusterType = "MongoDb" +) + +func PossibleValuesForMongoDbClusterType() []string { + return []string{ + string(MongoDbClusterTypeBlobContainer), + string(MongoDbClusterTypeCosmosDb), + string(MongoDbClusterTypeMongoDb), + } +} + +func (s *MongoDbClusterType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbClusterType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbClusterType(input string) (*MongoDbClusterType, error) { + vals := map[string]MongoDbClusterType{ + "blobcontainer": MongoDbClusterTypeBlobContainer, + "cosmosdb": MongoDbClusterTypeCosmosDb, + "mongodb": MongoDbClusterTypeMongoDb, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbClusterType(input) + return &out, nil +} + +type MongoDbErrorType string + +const ( + MongoDbErrorTypeError MongoDbErrorType = "Error" + MongoDbErrorTypeValidationError MongoDbErrorType = "ValidationError" + MongoDbErrorTypeWarning MongoDbErrorType = "Warning" +) + +func PossibleValuesForMongoDbErrorType() []string { + return []string{ + string(MongoDbErrorTypeError), + string(MongoDbErrorTypeValidationError), + string(MongoDbErrorTypeWarning), + } +} + +func (s *MongoDbErrorType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbErrorType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbErrorType(input string) (*MongoDbErrorType, error) { + vals := map[string]MongoDbErrorType{ + "error": MongoDbErrorTypeError, + "validationerror": MongoDbErrorTypeValidationError, + "warning": MongoDbErrorTypeWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbErrorType(input) + return &out, nil +} + +type MongoDbMigrationState string + +const ( + MongoDbMigrationStateCanceled MongoDbMigrationState = "Canceled" + MongoDbMigrationStateComplete MongoDbMigrationState = "Complete" + MongoDbMigrationStateCopying MongoDbMigrationState = "Copying" + MongoDbMigrationStateFailed MongoDbMigrationState = "Failed" + MongoDbMigrationStateFinalizing MongoDbMigrationState = "Finalizing" + MongoDbMigrationStateInitialReplay MongoDbMigrationState = "InitialReplay" + MongoDbMigrationStateInitializing MongoDbMigrationState = "Initializing" + MongoDbMigrationStateNotStarted MongoDbMigrationState = "NotStarted" + MongoDbMigrationStateReplaying MongoDbMigrationState = "Replaying" + MongoDbMigrationStateRestarting MongoDbMigrationState = "Restarting" + MongoDbMigrationStateValidatingInput MongoDbMigrationState = "ValidatingInput" +) + +func PossibleValuesForMongoDbMigrationState() []string { + return []string{ + string(MongoDbMigrationStateCanceled), + string(MongoDbMigrationStateComplete), + string(MongoDbMigrationStateCopying), + string(MongoDbMigrationStateFailed), + string(MongoDbMigrationStateFinalizing), + string(MongoDbMigrationStateInitialReplay), + string(MongoDbMigrationStateInitializing), + string(MongoDbMigrationStateNotStarted), + string(MongoDbMigrationStateReplaying), + string(MongoDbMigrationStateRestarting), + string(MongoDbMigrationStateValidatingInput), + } +} + +func (s *MongoDbMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbMigrationState(input string) (*MongoDbMigrationState, error) { + vals := map[string]MongoDbMigrationState{ + "canceled": MongoDbMigrationStateCanceled, + "complete": MongoDbMigrationStateComplete, + "copying": MongoDbMigrationStateCopying, + "failed": MongoDbMigrationStateFailed, + "finalizing": MongoDbMigrationStateFinalizing, + "initialreplay": MongoDbMigrationStateInitialReplay, + "initializing": MongoDbMigrationStateInitializing, + "notstarted": MongoDbMigrationStateNotStarted, + "replaying": MongoDbMigrationStateReplaying, + "restarting": MongoDbMigrationStateRestarting, + "validatinginput": MongoDbMigrationStateValidatingInput, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbMigrationState(input) + return &out, nil +} + +type MongoDbReplication string + +const ( + MongoDbReplicationContinuous MongoDbReplication = "Continuous" + MongoDbReplicationDisabled MongoDbReplication = "Disabled" + MongoDbReplicationOneTime MongoDbReplication = "OneTime" +) + +func PossibleValuesForMongoDbReplication() []string { + return []string{ + string(MongoDbReplicationContinuous), + string(MongoDbReplicationDisabled), + string(MongoDbReplicationOneTime), + } +} + +func (s *MongoDbReplication) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbReplication(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbReplication(input string) (*MongoDbReplication, error) { + vals := map[string]MongoDbReplication{ + "continuous": MongoDbReplicationContinuous, + "disabled": MongoDbReplicationDisabled, + "onetime": MongoDbReplicationOneTime, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbReplication(input) + return &out, nil +} + +type MongoDbShardKeyOrder string + +const ( + MongoDbShardKeyOrderForward MongoDbShardKeyOrder = "Forward" + MongoDbShardKeyOrderHashed MongoDbShardKeyOrder = "Hashed" + MongoDbShardKeyOrderReverse MongoDbShardKeyOrder = "Reverse" +) + +func PossibleValuesForMongoDbShardKeyOrder() []string { + return []string{ + string(MongoDbShardKeyOrderForward), + string(MongoDbShardKeyOrderHashed), + string(MongoDbShardKeyOrderReverse), + } +} + +func (s *MongoDbShardKeyOrder) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMongoDbShardKeyOrder(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMongoDbShardKeyOrder(input string) (*MongoDbShardKeyOrder, error) { + vals := map[string]MongoDbShardKeyOrder{ + "forward": MongoDbShardKeyOrderForward, + "hashed": MongoDbShardKeyOrderHashed, + "reverse": MongoDbShardKeyOrderReverse, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MongoDbShardKeyOrder(input) + return &out, nil +} + +type MySqlTargetPlatformType string + +const ( + MySqlTargetPlatformTypeAzureDbForMySQL MySqlTargetPlatformType = "AzureDbForMySQL" + MySqlTargetPlatformTypeSqlServer MySqlTargetPlatformType = "SqlServer" +) + +func PossibleValuesForMySqlTargetPlatformType() []string { + return []string{ + string(MySqlTargetPlatformTypeAzureDbForMySQL), + string(MySqlTargetPlatformTypeSqlServer), + } +} + +func (s *MySqlTargetPlatformType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMySqlTargetPlatformType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMySqlTargetPlatformType(input string) (*MySqlTargetPlatformType, error) { + vals := map[string]MySqlTargetPlatformType{ + "azuredbformysql": MySqlTargetPlatformTypeAzureDbForMySQL, + "sqlserver": MySqlTargetPlatformTypeSqlServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MySqlTargetPlatformType(input) + return &out, nil +} + +type ObjectType string + +const ( + ObjectTypeFunction ObjectType = "Function" + ObjectTypeStoredProcedures ObjectType = "StoredProcedures" + ObjectTypeTable ObjectType = "Table" + ObjectTypeUser ObjectType = "User" + ObjectTypeView ObjectType = "View" +) + +func PossibleValuesForObjectType() []string { + return []string{ + string(ObjectTypeFunction), + string(ObjectTypeStoredProcedures), + string(ObjectTypeTable), + string(ObjectTypeUser), + string(ObjectTypeView), + } +} + +func (s *ObjectType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseObjectType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseObjectType(input string) (*ObjectType, error) { + vals := map[string]ObjectType{ + "function": ObjectTypeFunction, + "storedprocedures": ObjectTypeStoredProcedures, + "table": ObjectTypeTable, + "user": ObjectTypeUser, + "view": ObjectTypeView, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ObjectType(input) + return &out, nil +} + +type ReplicateMigrationState string + +const ( + ReplicateMigrationStateACTIONREQUIRED ReplicateMigrationState = "ACTION_REQUIRED" + ReplicateMigrationStateCOMPLETE ReplicateMigrationState = "COMPLETE" + ReplicateMigrationStateFAILED ReplicateMigrationState = "FAILED" + ReplicateMigrationStatePENDING ReplicateMigrationState = "PENDING" + ReplicateMigrationStateUNDEFINED ReplicateMigrationState = "UNDEFINED" + ReplicateMigrationStateVALIDATING ReplicateMigrationState = "VALIDATING" +) + +func PossibleValuesForReplicateMigrationState() []string { + return []string{ + string(ReplicateMigrationStateACTIONREQUIRED), + string(ReplicateMigrationStateCOMPLETE), + string(ReplicateMigrationStateFAILED), + string(ReplicateMigrationStatePENDING), + string(ReplicateMigrationStateUNDEFINED), + string(ReplicateMigrationStateVALIDATING), + } +} + +func (s *ReplicateMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicateMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicateMigrationState(input string) (*ReplicateMigrationState, error) { + vals := map[string]ReplicateMigrationState{ + "action_required": ReplicateMigrationStateACTIONREQUIRED, + "complete": ReplicateMigrationStateCOMPLETE, + "failed": ReplicateMigrationStateFAILED, + "pending": ReplicateMigrationStatePENDING, + "undefined": ReplicateMigrationStateUNDEFINED, + "validating": ReplicateMigrationStateVALIDATING, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicateMigrationState(input) + return &out, nil +} + +type ResultType string + +const ( + ResultTypeCollection ResultType = "Collection" + ResultTypeDatabase ResultType = "Database" + ResultTypeMigration ResultType = "Migration" +) + +func PossibleValuesForResultType() []string { + return []string{ + string(ResultTypeCollection), + string(ResultTypeDatabase), + string(ResultTypeMigration), + } +} + +func (s *ResultType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResultType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResultType(input string) (*ResultType, error) { + vals := map[string]ResultType{ + "collection": ResultTypeCollection, + "database": ResultTypeDatabase, + "migration": ResultTypeMigration, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResultType(input) + return &out, nil +} + +type ScenarioSource string + +const ( + ScenarioSourceAccess ScenarioSource = "Access" + ScenarioSourceDBTwo ScenarioSource = "DB2" + ScenarioSourceMongoDB ScenarioSource = "MongoDB" + ScenarioSourceMySQL ScenarioSource = "MySQL" + ScenarioSourceMySQLRDS ScenarioSource = "MySQLRDS" + ScenarioSourceOracle ScenarioSource = "Oracle" + ScenarioSourcePostgreSQL ScenarioSource = "PostgreSQL" + ScenarioSourcePostgreSQLRDS ScenarioSource = "PostgreSQLRDS" + ScenarioSourceSQL ScenarioSource = "SQL" + ScenarioSourceSQLRDS ScenarioSource = "SQLRDS" + ScenarioSourceSybase ScenarioSource = "Sybase" +) + +func PossibleValuesForScenarioSource() []string { + return []string{ + string(ScenarioSourceAccess), + string(ScenarioSourceDBTwo), + string(ScenarioSourceMongoDB), + string(ScenarioSourceMySQL), + string(ScenarioSourceMySQLRDS), + string(ScenarioSourceOracle), + string(ScenarioSourcePostgreSQL), + string(ScenarioSourcePostgreSQLRDS), + string(ScenarioSourceSQL), + string(ScenarioSourceSQLRDS), + string(ScenarioSourceSybase), + } +} + +func (s *ScenarioSource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioSource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioSource(input string) (*ScenarioSource, error) { + vals := map[string]ScenarioSource{ + "access": ScenarioSourceAccess, + "db2": ScenarioSourceDBTwo, + "mongodb": ScenarioSourceMongoDB, + "mysql": ScenarioSourceMySQL, + "mysqlrds": ScenarioSourceMySQLRDS, + "oracle": ScenarioSourceOracle, + "postgresql": ScenarioSourcePostgreSQL, + "postgresqlrds": ScenarioSourcePostgreSQLRDS, + "sql": ScenarioSourceSQL, + "sqlrds": ScenarioSourceSQLRDS, + "sybase": ScenarioSourceSybase, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioSource(input) + return &out, nil +} + +type ScenarioTarget string + +const ( + ScenarioTargetAzureDBForMySql ScenarioTarget = "AzureDBForMySql" + ScenarioTargetAzureDBForPostgresSQL ScenarioTarget = "AzureDBForPostgresSQL" + ScenarioTargetMongoDB ScenarioTarget = "MongoDB" + ScenarioTargetSQLDB ScenarioTarget = "SQLDB" + ScenarioTargetSQLDW ScenarioTarget = "SQLDW" + ScenarioTargetSQLMI ScenarioTarget = "SQLMI" + ScenarioTargetSQLServer ScenarioTarget = "SQLServer" +) + +func PossibleValuesForScenarioTarget() []string { + return []string{ + string(ScenarioTargetAzureDBForMySql), + string(ScenarioTargetAzureDBForPostgresSQL), + string(ScenarioTargetMongoDB), + string(ScenarioTargetSQLDB), + string(ScenarioTargetSQLDW), + string(ScenarioTargetSQLMI), + string(ScenarioTargetSQLServer), + } +} + +func (s *ScenarioTarget) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseScenarioTarget(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseScenarioTarget(input string) (*ScenarioTarget, error) { + vals := map[string]ScenarioTarget{ + "azuredbformysql": ScenarioTargetAzureDBForMySql, + "azuredbforpostgressql": ScenarioTargetAzureDBForPostgresSQL, + "mongodb": ScenarioTargetMongoDB, + "sqldb": ScenarioTargetSQLDB, + "sqldw": ScenarioTargetSQLDW, + "sqlmi": ScenarioTargetSQLMI, + "sqlserver": ScenarioTargetSQLServer, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ScenarioTarget(input) + return &out, nil +} + +type ServerLevelPermissionsGroup string + +const ( + ServerLevelPermissionsGroupDefault ServerLevelPermissionsGroup = "Default" + ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL ServerLevelPermissionsGroup = "MigrationFromMySQLToAzureDBForMySQL" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureDB" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureMI" + ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM ServerLevelPermissionsGroup = "MigrationFromSqlServerToAzureVM" +) + +func PossibleValuesForServerLevelPermissionsGroup() []string { + return []string{ + string(ServerLevelPermissionsGroupDefault), + string(ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI), + string(ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM), + } +} + +func (s *ServerLevelPermissionsGroup) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServerLevelPermissionsGroup(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServerLevelPermissionsGroup(input string) (*ServerLevelPermissionsGroup, error) { + vals := map[string]ServerLevelPermissionsGroup{ + "default": ServerLevelPermissionsGroupDefault, + "migrationfrommysqltoazuredbformysql": ServerLevelPermissionsGroupMigrationFromMySQLToAzureDBForMySQL, + "migrationfromsqlservertoazuredb": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureDB, + "migrationfromsqlservertoazuremi": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureMI, + "migrationfromsqlservertoazurevm": ServerLevelPermissionsGroupMigrationFromSqlServerToAzureVM, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServerLevelPermissionsGroup(input) + return &out, nil +} + +type Severity string + +const ( + SeverityError Severity = "Error" + SeverityMessage Severity = "Message" + SeverityWarning Severity = "Warning" +) + +func PossibleValuesForSeverity() []string { + return []string{ + string(SeverityError), + string(SeverityMessage), + string(SeverityWarning), + } +} + +func (s *Severity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSeverity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSeverity(input string) (*Severity, error) { + vals := map[string]Severity{ + "error": SeverityError, + "message": SeverityMessage, + "warning": SeverityWarning, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Severity(input) + return &out, nil +} + +type SqlSourcePlatform string + +const ( + SqlSourcePlatformSqlOnPrem SqlSourcePlatform = "SqlOnPrem" +) + +func PossibleValuesForSqlSourcePlatform() []string { + return []string{ + string(SqlSourcePlatformSqlOnPrem), + } +} + +func (s *SqlSourcePlatform) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSqlSourcePlatform(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSqlSourcePlatform(input string) (*SqlSourcePlatform, error) { + vals := map[string]SqlSourcePlatform{ + "sqlonprem": SqlSourcePlatformSqlOnPrem, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SqlSourcePlatform(input) + return &out, nil +} + +type SsisMigrationOverwriteOption string + +const ( + SsisMigrationOverwriteOptionIgnore SsisMigrationOverwriteOption = "Ignore" + SsisMigrationOverwriteOptionOverwrite SsisMigrationOverwriteOption = "Overwrite" +) + +func PossibleValuesForSsisMigrationOverwriteOption() []string { + return []string{ + string(SsisMigrationOverwriteOptionIgnore), + string(SsisMigrationOverwriteOptionOverwrite), + } +} + +func (s *SsisMigrationOverwriteOption) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationOverwriteOption(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationOverwriteOption(input string) (*SsisMigrationOverwriteOption, error) { + vals := map[string]SsisMigrationOverwriteOption{ + "ignore": SsisMigrationOverwriteOptionIgnore, + "overwrite": SsisMigrationOverwriteOptionOverwrite, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationOverwriteOption(input) + return &out, nil +} + +type SsisMigrationStage string + +const ( + SsisMigrationStageCompleted SsisMigrationStage = "Completed" + SsisMigrationStageInProgress SsisMigrationStage = "InProgress" + SsisMigrationStageInitialize SsisMigrationStage = "Initialize" + SsisMigrationStageNone SsisMigrationStage = "None" +) + +func PossibleValuesForSsisMigrationStage() []string { + return []string{ + string(SsisMigrationStageCompleted), + string(SsisMigrationStageInProgress), + string(SsisMigrationStageInitialize), + string(SsisMigrationStageNone), + } +} + +func (s *SsisMigrationStage) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisMigrationStage(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisMigrationStage(input string) (*SsisMigrationStage, error) { + vals := map[string]SsisMigrationStage{ + "completed": SsisMigrationStageCompleted, + "inprogress": SsisMigrationStageInProgress, + "initialize": SsisMigrationStageInitialize, + "none": SsisMigrationStageNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisMigrationStage(input) + return &out, nil +} + +type SsisStoreType string + +const ( + SsisStoreTypeSsisCatalog SsisStoreType = "SsisCatalog" +) + +func PossibleValuesForSsisStoreType() []string { + return []string{ + string(SsisStoreTypeSsisCatalog), + } +} + +func (s *SsisStoreType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSsisStoreType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSsisStoreType(input string) (*SsisStoreType, error) { + vals := map[string]SsisStoreType{ + "ssiscatalog": SsisStoreTypeSsisCatalog, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SsisStoreType(input) + return &out, nil +} + +type SyncDatabaseMigrationReportingState string + +const ( + SyncDatabaseMigrationReportingStateBACKUPCOMPLETED SyncDatabaseMigrationReportingState = "BACKUP_COMPLETED" + SyncDatabaseMigrationReportingStateBACKUPINPROGRESS SyncDatabaseMigrationReportingState = "BACKUP_IN_PROGRESS" + SyncDatabaseMigrationReportingStateCANCELLED SyncDatabaseMigrationReportingState = "CANCELLED" + SyncDatabaseMigrationReportingStateCANCELLING SyncDatabaseMigrationReportingState = "CANCELLING" + SyncDatabaseMigrationReportingStateCOMPLETE SyncDatabaseMigrationReportingState = "COMPLETE" + SyncDatabaseMigrationReportingStateCOMPLETING SyncDatabaseMigrationReportingState = "COMPLETING" + SyncDatabaseMigrationReportingStateCONFIGURING SyncDatabaseMigrationReportingState = "CONFIGURING" + SyncDatabaseMigrationReportingStateFAILED SyncDatabaseMigrationReportingState = "FAILED" + SyncDatabaseMigrationReportingStateINITIALIAZING SyncDatabaseMigrationReportingState = "INITIALIAZING" + SyncDatabaseMigrationReportingStateREADYTOCOMPLETE SyncDatabaseMigrationReportingState = "READY_TO_COMPLETE" + SyncDatabaseMigrationReportingStateRESTORECOMPLETED SyncDatabaseMigrationReportingState = "RESTORE_COMPLETED" + SyncDatabaseMigrationReportingStateRESTOREINPROGRESS SyncDatabaseMigrationReportingState = "RESTORE_IN_PROGRESS" + SyncDatabaseMigrationReportingStateRUNNING SyncDatabaseMigrationReportingState = "RUNNING" + SyncDatabaseMigrationReportingStateSTARTING SyncDatabaseMigrationReportingState = "STARTING" + SyncDatabaseMigrationReportingStateUNDEFINED SyncDatabaseMigrationReportingState = "UNDEFINED" + SyncDatabaseMigrationReportingStateVALIDATING SyncDatabaseMigrationReportingState = "VALIDATING" + SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE SyncDatabaseMigrationReportingState = "VALIDATION_COMPLETE" + SyncDatabaseMigrationReportingStateVALIDATIONFAILED SyncDatabaseMigrationReportingState = "VALIDATION_FAILED" +) + +func PossibleValuesForSyncDatabaseMigrationReportingState() []string { + return []string{ + string(SyncDatabaseMigrationReportingStateBACKUPCOMPLETED), + string(SyncDatabaseMigrationReportingStateBACKUPINPROGRESS), + string(SyncDatabaseMigrationReportingStateCANCELLED), + string(SyncDatabaseMigrationReportingStateCANCELLING), + string(SyncDatabaseMigrationReportingStateCOMPLETE), + string(SyncDatabaseMigrationReportingStateCOMPLETING), + string(SyncDatabaseMigrationReportingStateCONFIGURING), + string(SyncDatabaseMigrationReportingStateFAILED), + string(SyncDatabaseMigrationReportingStateINITIALIAZING), + string(SyncDatabaseMigrationReportingStateREADYTOCOMPLETE), + string(SyncDatabaseMigrationReportingStateRESTORECOMPLETED), + string(SyncDatabaseMigrationReportingStateRESTOREINPROGRESS), + string(SyncDatabaseMigrationReportingStateRUNNING), + string(SyncDatabaseMigrationReportingStateSTARTING), + string(SyncDatabaseMigrationReportingStateUNDEFINED), + string(SyncDatabaseMigrationReportingStateVALIDATING), + string(SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE), + string(SyncDatabaseMigrationReportingStateVALIDATIONFAILED), + } +} + +func (s *SyncDatabaseMigrationReportingState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncDatabaseMigrationReportingState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncDatabaseMigrationReportingState(input string) (*SyncDatabaseMigrationReportingState, error) { + vals := map[string]SyncDatabaseMigrationReportingState{ + "backup_completed": SyncDatabaseMigrationReportingStateBACKUPCOMPLETED, + "backup_in_progress": SyncDatabaseMigrationReportingStateBACKUPINPROGRESS, + "cancelled": SyncDatabaseMigrationReportingStateCANCELLED, + "cancelling": SyncDatabaseMigrationReportingStateCANCELLING, + "complete": SyncDatabaseMigrationReportingStateCOMPLETE, + "completing": SyncDatabaseMigrationReportingStateCOMPLETING, + "configuring": SyncDatabaseMigrationReportingStateCONFIGURING, + "failed": SyncDatabaseMigrationReportingStateFAILED, + "initialiazing": SyncDatabaseMigrationReportingStateINITIALIAZING, + "ready_to_complete": SyncDatabaseMigrationReportingStateREADYTOCOMPLETE, + "restore_completed": SyncDatabaseMigrationReportingStateRESTORECOMPLETED, + "restore_in_progress": SyncDatabaseMigrationReportingStateRESTOREINPROGRESS, + "running": SyncDatabaseMigrationReportingStateRUNNING, + "starting": SyncDatabaseMigrationReportingStateSTARTING, + "undefined": SyncDatabaseMigrationReportingStateUNDEFINED, + "validating": SyncDatabaseMigrationReportingStateVALIDATING, + "validation_complete": SyncDatabaseMigrationReportingStateVALIDATIONCOMPLETE, + "validation_failed": SyncDatabaseMigrationReportingStateVALIDATIONFAILED, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncDatabaseMigrationReportingState(input) + return &out, nil +} + +type SyncTableMigrationState string + +const ( + SyncTableMigrationStateBEFORELOAD SyncTableMigrationState = "BEFORE_LOAD" + SyncTableMigrationStateCANCELED SyncTableMigrationState = "CANCELED" + SyncTableMigrationStateCOMPLETED SyncTableMigrationState = "COMPLETED" + SyncTableMigrationStateERROR SyncTableMigrationState = "ERROR" + SyncTableMigrationStateFAILED SyncTableMigrationState = "FAILED" + SyncTableMigrationStateFULLLOAD SyncTableMigrationState = "FULL_LOAD" +) + +func PossibleValuesForSyncTableMigrationState() []string { + return []string{ + string(SyncTableMigrationStateBEFORELOAD), + string(SyncTableMigrationStateCANCELED), + string(SyncTableMigrationStateCOMPLETED), + string(SyncTableMigrationStateERROR), + string(SyncTableMigrationStateFAILED), + string(SyncTableMigrationStateFULLLOAD), + } +} + +func (s *SyncTableMigrationState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSyncTableMigrationState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSyncTableMigrationState(input string) (*SyncTableMigrationState, error) { + vals := map[string]SyncTableMigrationState{ + "before_load": SyncTableMigrationStateBEFORELOAD, + "canceled": SyncTableMigrationStateCANCELED, + "completed": SyncTableMigrationStateCOMPLETED, + "error": SyncTableMigrationStateERROR, + "failed": SyncTableMigrationStateFAILED, + "full_load": SyncTableMigrationStateFULLLOAD, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SyncTableMigrationState(input) + return &out, nil +} + +type TaskState string + +const ( + TaskStateCanceled TaskState = "Canceled" + TaskStateFailed TaskState = "Failed" + TaskStateFailedInputValidation TaskState = "FailedInputValidation" + TaskStateFaulted TaskState = "Faulted" + TaskStateQueued TaskState = "Queued" + TaskStateRunning TaskState = "Running" + TaskStateSucceeded TaskState = "Succeeded" + TaskStateUnknown TaskState = "Unknown" +) + +func PossibleValuesForTaskState() []string { + return []string{ + string(TaskStateCanceled), + string(TaskStateFailed), + string(TaskStateFailedInputValidation), + string(TaskStateFaulted), + string(TaskStateQueued), + string(TaskStateRunning), + string(TaskStateSucceeded), + string(TaskStateUnknown), + } +} + +func (s *TaskState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskState(input string) (*TaskState, error) { + vals := map[string]TaskState{ + "canceled": TaskStateCanceled, + "failed": TaskStateFailed, + "failedinputvalidation": TaskStateFailedInputValidation, + "faulted": TaskStateFaulted, + "queued": TaskStateQueued, + "running": TaskStateRunning, + "succeeded": TaskStateSucceeded, + "unknown": TaskStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskState(input) + return &out, nil +} + +type TaskType string + +const ( + TaskTypeConnectPointMongoDb TaskType = "Connect.MongoDb" + TaskTypeConnectToSourcePointMySql TaskType = "ConnectToSource.MySql" + TaskTypeConnectToSourcePointOraclePointSync TaskType = "ConnectToSource.Oracle.Sync" + TaskTypeConnectToSourcePointPostgreSqlPointSync TaskType = "ConnectToSource.PostgreSql.Sync" + TaskTypeConnectToSourcePointSqlServer TaskType = "ConnectToSource.SqlServer" + TaskTypeConnectToSourcePointSqlServerPointSync TaskType = "ConnectToSource.SqlServer.Sync" + TaskTypeConnectToTargetPointAzureDbForMySql TaskType = "ConnectToTarget.AzureDbForMySql" + TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointAzureSqlDbMI TaskType = "ConnectToTarget.AzureSqlDbMI" + TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS TaskType = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync TaskType = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeConnectToTargetPointSqlDb TaskType = "ConnectToTarget.SqlDb" + TaskTypeConnectToTargetPointSqlDbPointSync TaskType = "ConnectToTarget.SqlDb.Sync" + TaskTypeGetTDECertificatesPointSql TaskType = "GetTDECertificates.Sql" + TaskTypeGetUserTablesMySql TaskType = "GetUserTablesMySql" + TaskTypeGetUserTablesOracle TaskType = "GetUserTablesOracle" + TaskTypeGetUserTablesPointAzureSqlDbPointSync TaskType = "GetUserTables.AzureSqlDb.Sync" + TaskTypeGetUserTablesPointSql TaskType = "GetUserTables.Sql" + TaskTypeGetUserTablesPostgreSql TaskType = "GetUserTablesPostgreSql" + TaskTypeMigratePointMongoDb TaskType = "Migrate.MongoDb" + TaskTypeMigratePointMySqlPointAzureDbForMySql TaskType = "Migrate.MySql.AzureDbForMySql" + TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync TaskType = "Migrate.MySql.AzureDbForMySql.Sync" + TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync TaskType = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo TaskType = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + TaskTypeMigratePointSqlServerPointAzureSqlDbMI TaskType = "Migrate.SqlServer.AzureSqlDbMI" + TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync TaskType = "Migrate.SqlServer.AzureSqlDb.Sync" + TaskTypeMigratePointSqlServerPointSqlDb TaskType = "Migrate.SqlServer.SqlDb" + TaskTypeMigratePointSsis TaskType = "Migrate.Ssis" + TaskTypeMigrateSchemaSqlServerSqlDb TaskType = "MigrateSchemaSqlServerSqlDb" + TaskTypeServicePointCheckPointOCI TaskType = "Service.Check.OCI" + TaskTypeServicePointInstallPointOCI TaskType = "Service.Install.OCI" + TaskTypeServicePointUploadPointOCI TaskType = "Service.Upload.OCI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS TaskType = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync TaskType = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + TaskTypeValidatePointMongoDb TaskType = "Validate.MongoDb" + TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync TaskType = "Validate.Oracle.AzureDbPostgreSql.Sync" +) + +func PossibleValuesForTaskType() []string { + return []string{ + string(TaskTypeConnectPointMongoDb), + string(TaskTypeConnectToSourcePointMySql), + string(TaskTypeConnectToSourcePointOraclePointSync), + string(TaskTypeConnectToSourcePointPostgreSqlPointSync), + string(TaskTypeConnectToSourcePointSqlServer), + string(TaskTypeConnectToSourcePointSqlServerPointSync), + string(TaskTypeConnectToTargetPointAzureDbForMySql), + string(TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointAzureSqlDbMI), + string(TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeConnectToTargetPointSqlDb), + string(TaskTypeConnectToTargetPointSqlDbPointSync), + string(TaskTypeGetTDECertificatesPointSql), + string(TaskTypeGetUserTablesMySql), + string(TaskTypeGetUserTablesOracle), + string(TaskTypeGetUserTablesPointAzureSqlDbPointSync), + string(TaskTypeGetUserTablesPointSql), + string(TaskTypeGetUserTablesPostgreSql), + string(TaskTypeMigratePointMongoDb), + string(TaskTypeMigratePointMySqlPointAzureDbForMySql), + string(TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync), + string(TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync), + string(TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMI), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync), + string(TaskTypeMigratePointSqlServerPointSqlDb), + string(TaskTypeMigratePointSsis), + string(TaskTypeMigrateSchemaSqlServerSqlDb), + string(TaskTypeServicePointCheckPointOCI), + string(TaskTypeServicePointInstallPointOCI), + string(TaskTypeServicePointUploadPointOCI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI), + string(TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS), + string(TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync), + string(TaskTypeValidatePointMongoDb), + string(TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync), + } +} + +func (s *TaskType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTaskType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTaskType(input string) (*TaskType, error) { + vals := map[string]TaskType{ + "connect.mongodb": TaskTypeConnectPointMongoDb, + "connecttosource.mysql": TaskTypeConnectToSourcePointMySql, + "connecttosource.oracle.sync": TaskTypeConnectToSourcePointOraclePointSync, + "connecttosource.postgresql.sync": TaskTypeConnectToSourcePointPostgreSqlPointSync, + "connecttosource.sqlserver": TaskTypeConnectToSourcePointSqlServer, + "connecttosource.sqlserver.sync": TaskTypeConnectToSourcePointSqlServerPointSync, + "connecttotarget.azuredbformysql": TaskTypeConnectToTargetPointAzureDbForMySql, + "connecttotarget.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointAzureDbForPostgreSqlPointSync, + "connecttotarget.azuresqldbmi": TaskTypeConnectToTargetPointAzureSqlDbMI, + "connecttotarget.azuresqldbmi.sync.lrs": TaskTypeConnectToTargetPointAzureSqlDbMIPointSyncPointLRS, + "connecttotarget.oracle.azuredbforpostgresql.sync": TaskTypeConnectToTargetPointOraclePointAzureDbForPostgreSqlPointSync, + "connecttotarget.sqldb": TaskTypeConnectToTargetPointSqlDb, + "connecttotarget.sqldb.sync": TaskTypeConnectToTargetPointSqlDbPointSync, + "gettdecertificates.sql": TaskTypeGetTDECertificatesPointSql, + "getusertablesmysql": TaskTypeGetUserTablesMySql, + "getusertablesoracle": TaskTypeGetUserTablesOracle, + "getusertables.azuresqldb.sync": TaskTypeGetUserTablesPointAzureSqlDbPointSync, + "getusertables.sql": TaskTypeGetUserTablesPointSql, + "getusertablespostgresql": TaskTypeGetUserTablesPostgreSql, + "migrate.mongodb": TaskTypeMigratePointMongoDb, + "migrate.mysql.azuredbformysql": TaskTypeMigratePointMySqlPointAzureDbForMySql, + "migrate.mysql.azuredbformysql.sync": TaskTypeMigratePointMySqlPointAzureDbForMySqlPointSync, + "migrate.oracle.azuredbforpostgresql.sync": TaskTypeMigratePointOraclePointAzureDbForPostgreSqlPointSync, + "migrate.postgresql.azuredbforpostgresql.syncv2": TaskTypeMigratePointPostgreSqlPointAzureDbForPostgreSqlPointSyncVTwo, + "migrate.sqlserver.azuresqldbmi": TaskTypeMigratePointSqlServerPointAzureSqlDbMI, + "migrate.sqlserver.azuresqldbmi.sync.lrs": TaskTypeMigratePointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "migrate.sqlserver.azuresqldb.sync": TaskTypeMigratePointSqlServerPointAzureSqlDbPointSync, + "migrate.sqlserver.sqldb": TaskTypeMigratePointSqlServerPointSqlDb, + "migrate.ssis": TaskTypeMigratePointSsis, + "migrateschemasqlserversqldb": TaskTypeMigrateSchemaSqlServerSqlDb, + "service.check.oci": TaskTypeServicePointCheckPointOCI, + "service.install.oci": TaskTypeServicePointInstallPointOCI, + "service.upload.oci": TaskTypeServicePointUploadPointOCI, + "validatemigrationinput.sqlserver.azuresqldbmi": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMI, + "validatemigrationinput.sqlserver.azuresqldbmi.sync.lrs": TaskTypeValidateMigrationInputPointSqlServerPointAzureSqlDbMIPointSyncPointLRS, + "validatemigrationinput.sqlserver.sqldb.sync": TaskTypeValidateMigrationInputPointSqlServerPointSqlDbPointSync, + "validate.mongodb": TaskTypeValidatePointMongoDb, + "validate.oracle.azuredbpostgresql.sync": TaskTypeValidatePointOraclePointAzureDbPostgreSqlPointSync, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TaskType(input) + return &out, nil +} + +type UpdateActionType string + +const ( + UpdateActionTypeAddedOnTarget UpdateActionType = "AddedOnTarget" + UpdateActionTypeChangedOnTarget UpdateActionType = "ChangedOnTarget" + UpdateActionTypeDeletedOnTarget UpdateActionType = "DeletedOnTarget" +) + +func PossibleValuesForUpdateActionType() []string { + return []string{ + string(UpdateActionTypeAddedOnTarget), + string(UpdateActionTypeChangedOnTarget), + string(UpdateActionTypeDeletedOnTarget), + } +} + +func (s *UpdateActionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUpdateActionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUpdateActionType(input string) (*UpdateActionType, error) { + vals := map[string]UpdateActionType{ + "addedontarget": UpdateActionTypeAddedOnTarget, + "changedontarget": UpdateActionTypeChangedOnTarget, + "deletedontarget": UpdateActionTypeDeletedOnTarget, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UpdateActionType(input) + return &out, nil +} + +type ValidationStatus string + +const ( + ValidationStatusCompleted ValidationStatus = "Completed" + ValidationStatusCompletedWithIssues ValidationStatus = "CompletedWithIssues" + ValidationStatusDefault ValidationStatus = "Default" + ValidationStatusFailed ValidationStatus = "Failed" + ValidationStatusInProgress ValidationStatus = "InProgress" + ValidationStatusInitialized ValidationStatus = "Initialized" + ValidationStatusNotStarted ValidationStatus = "NotStarted" + ValidationStatusStopped ValidationStatus = "Stopped" +) + +func PossibleValuesForValidationStatus() []string { + return []string{ + string(ValidationStatusCompleted), + string(ValidationStatusCompletedWithIssues), + string(ValidationStatusDefault), + string(ValidationStatusFailed), + string(ValidationStatusInProgress), + string(ValidationStatusInitialized), + string(ValidationStatusNotStarted), + string(ValidationStatusStopped), + } +} + +func (s *ValidationStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseValidationStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseValidationStatus(input string) (*ValidationStatus, error) { + vals := map[string]ValidationStatus{ + "completed": ValidationStatusCompleted, + "completedwithissues": ValidationStatusCompletedWithIssues, + "default": ValidationStatusDefault, + "failed": ValidationStatusFailed, + "inprogress": ValidationStatusInProgress, + "initialized": ValidationStatusInitialized, + "notstarted": ValidationStatusNotStarted, + "stopped": ValidationStatusStopped, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ValidationStatus(input) + return &out, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/id_task.go b/resource-manager/datamigration/2025-06-30/taskresource/id_task.go new file mode 100644 index 00000000000..6fb1e1fb21d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/id_task.go @@ -0,0 +1,148 @@ +package taskresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&TaskId{}) +} + +var _ resourceids.ResourceId = &TaskId{} + +// TaskId is a struct representing the Resource ID for a Task +type TaskId struct { + SubscriptionId string + ResourceGroupName string + ServiceName string + ProjectName string + TaskName string +} + +// NewTaskID returns a new TaskId struct +func NewTaskID(subscriptionId string, resourceGroupName string, serviceName string, projectName string, taskName string) TaskId { + return TaskId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + ServiceName: serviceName, + ProjectName: projectName, + TaskName: taskName, + } +} + +// ParseTaskID parses 'input' into a TaskId +func ParseTaskID(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseTaskIDInsensitively parses 'input' case-insensitively into a TaskId +// note: this method should only be used for API response data and not user input +func ParseTaskIDInsensitively(input string) (*TaskId, error) { + parser := resourceids.NewParserFromResourceIdType(&TaskId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := TaskId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *TaskId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.ServiceName, ok = input.Parsed["serviceName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "serviceName", input) + } + + if id.ProjectName, ok = input.Parsed["projectName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "projectName", input) + } + + if id.TaskName, ok = input.Parsed["taskName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "taskName", input) + } + + return nil +} + +// ValidateTaskID checks that 'input' can be parsed as a Task ID +func ValidateTaskID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseTaskID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Task ID +func (id TaskId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.DataMigration/services/%s/projects/%s/tasks/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.ServiceName, id.ProjectName, id.TaskName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Task ID +func (id TaskId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.UserSpecifiedSegment("resourceGroupName", "resourceGroupName"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftDataMigration", "Microsoft.DataMigration", "Microsoft.DataMigration"), + resourceids.StaticSegment("staticServices", "services", "services"), + resourceids.UserSpecifiedSegment("serviceName", "serviceName"), + resourceids.StaticSegment("staticProjects", "projects", "projects"), + resourceids.UserSpecifiedSegment("projectName", "projectName"), + resourceids.StaticSegment("staticTasks", "tasks", "tasks"), + resourceids.UserSpecifiedSegment("taskName", "taskName"), + } +} + +// String returns a human-readable description of this Task ID +func (id TaskId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Service Name: %q", id.ServiceName), + fmt.Sprintf("Project Name: %q", id.ProjectName), + fmt.Sprintf("Task Name: %q", id.TaskName), + } + return fmt.Sprintf("Task (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/id_task_test.go b/resource-manager/datamigration/2025-06-30/taskresource/id_task_test.go new file mode 100644 index 00000000000..f174a9f0a71 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/id_task_test.go @@ -0,0 +1,372 @@ +package taskresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &TaskId{} + +func TestNewTaskID(t *testing.T) { + id := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "resourceGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "resourceGroupName") + } + + if id.ServiceName != "serviceName" { + t.Fatalf("Expected %q but got %q for Segment 'ServiceName'", id.ServiceName, "serviceName") + } + + if id.ProjectName != "projectName" { + t.Fatalf("Expected %q but got %q for Segment 'ProjectName'", id.ProjectName, "projectName") + } + + if id.TaskName != "taskName" { + t.Fatalf("Expected %q but got %q for Segment 'TaskName'", id.TaskName, "taskName") + } +} + +func TestFormatTaskID(t *testing.T) { + actual := NewTaskID("12345678-1234-9876-4563-123456789012", "resourceGroupName", "serviceName", "projectName", "taskName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseTaskID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestParseTaskIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *TaskId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "resourceGroupName", + ServiceName: "serviceName", + ProjectName: "projectName", + TaskName: "taskName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resourceGroupName/providers/Microsoft.DataMigration/services/serviceName/projects/projectName/tasks/taskName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE", + Expected: &TaskId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "rEsOuRcEgRoUpNaMe", + ServiceName: "sErViCeNaMe", + ProjectName: "pRoJeCtNaMe", + TaskName: "tAsKnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/rEsOuRcEgRoUpNaMe/pRoViDeRs/mIcRoSoFt.dAtAmIgRaTiOn/sErViCeS/sErViCeNaMe/pRoJeCtS/pRoJeCtNaMe/tAsKs/tAsKnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseTaskIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.ServiceName != v.Expected.ServiceName { + t.Fatalf("Expected %q but got %q for ServiceName", v.Expected.ServiceName, actual.ServiceName) + } + + if actual.ProjectName != v.Expected.ProjectName { + t.Fatalf("Expected %q but got %q for ProjectName", v.Expected.ProjectName, actual.ProjectName) + } + + if actual.TaskName != v.Expected.TaskName { + t.Fatalf("Expected %q but got %q for TaskName", v.Expected.TaskName, actual.TaskName) + } + + } +} + +func TestSegmentsForTaskId(t *testing.T) { + segments := TaskId{}.Segments() + if len(segments) == 0 { + t.Fatalf("TaskId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/method_taskscancel.go b/resource-manager/datamigration/2025-06-30/taskresource/method_taskscancel.go new file mode 100644 index 00000000000..3704d4b0672 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/method_taskscancel.go @@ -0,0 +1,54 @@ +package taskresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCancelOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksCancel ... +func (c TaskResourceClient) TasksCancel(ctx context.Context, id TaskId) (result TasksCancelOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/cancel", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/method_taskscommand.go b/resource-manager/datamigration/2025-06-30/taskresource/method_taskscommand.go new file mode 100644 index 00000000000..97138ce09d1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/method_taskscommand.go @@ -0,0 +1,63 @@ +package taskresource + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCommandOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model CommandProperties +} + +// TasksCommand ... +func (c TaskResourceClient) TasksCommand(ctx context.Context, id TaskId, input CommandProperties) (result TasksCommandOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/command", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var respObj json.RawMessage + if err = resp.Unmarshal(&respObj); err != nil { + return + } + model, err := UnmarshalCommandPropertiesImplementation(respObj) + if err != nil { + return + } + result.Model = model + + return +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/method_taskscreateorupdate.go b/resource-manager/datamigration/2025-06-30/taskresource/method_taskscreateorupdate.go new file mode 100644 index 00000000000..ffa9541b884 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/method_taskscreateorupdate.go @@ -0,0 +1,58 @@ +package taskresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksCreateOrUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksCreateOrUpdate ... +func (c TaskResourceClient) TasksCreateOrUpdate(ctx context.Context, id TaskId, input ProjectTask) (result TasksCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/method_tasksdelete.go b/resource-manager/datamigration/2025-06-30/taskresource/method_tasksdelete.go new file mode 100644 index 00000000000..8605acbd0f9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/method_tasksdelete.go @@ -0,0 +1,77 @@ +package taskresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksDeleteOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData +} + +type TasksDeleteOperationOptions struct { + DeleteRunningTasks *bool +} + +func DefaultTasksDeleteOperationOptions() TasksDeleteOperationOptions { + return TasksDeleteOperationOptions{} +} + +func (o TasksDeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksDeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksDeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.DeleteRunningTasks != nil { + out.Append("deleteRunningTasks", fmt.Sprintf("%v", *o.DeleteRunningTasks)) + } + return &out +} + +// TasksDelete ... +func (c TaskResourceClient) TasksDelete(ctx context.Context, id TaskId, options TasksDeleteOperationOptions) (result TasksDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/method_tasksget.go b/resource-manager/datamigration/2025-06-30/taskresource/method_tasksget.go new file mode 100644 index 00000000000..57ee4118924 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/method_tasksget.go @@ -0,0 +1,83 @@ +package taskresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +type TasksGetOperationOptions struct { + Expand *string +} + +func DefaultTasksGetOperationOptions() TasksGetOperationOptions { + return TasksGetOperationOptions{} +} + +func (o TasksGetOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o TasksGetOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o TasksGetOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Expand != nil { + out.Append("$expand", fmt.Sprintf("%v", *o.Expand)) + } + return &out +} + +// TasksGet ... +func (c TaskResourceClient) TasksGet(ctx context.Context, id TaskId, options TasksGetOperationOptions) (result TasksGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/method_tasksupdate.go b/resource-manager/datamigration/2025-06-30/taskresource/method_tasksupdate.go new file mode 100644 index 00000000000..5c2a5cee6b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/method_tasksupdate.go @@ -0,0 +1,57 @@ +package taskresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type TasksUpdateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ProjectTask +} + +// TasksUpdate ... +func (c TaskResourceClient) TasksUpdate(ctx context.Context, id TaskId, input ProjectTask) (result TasksUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ProjectTask + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_azureactivedirectoryapp.go b/resource-manager/datamigration/2025-06-30/taskresource/model_azureactivedirectoryapp.go new file mode 100644 index 00000000000..3c97d24a681 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_azureactivedirectoryapp.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AzureActiveDirectoryApp struct { + AppKey *string `json:"appKey,omitempty"` + ApplicationId *string `json:"applicationId,omitempty"` + IgnoreAzurePermissions *bool `json:"ignoreAzurePermissions,omitempty"` + TenantId *string `json:"tenantId,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_backupfileinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_backupfileinfo.go new file mode 100644 index 00000000000..dd2961a166a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_backupfileinfo.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupFileInfo struct { + FamilySequenceNumber *int64 `json:"familySequenceNumber,omitempty"` + FileLocation *string `json:"fileLocation,omitempty"` + Status *BackupFileStatus `json:"status,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_backupsetinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_backupsetinfo.go new file mode 100644 index 00000000000..769d00712d6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_backupsetinfo.go @@ -0,0 +1,59 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupSetInfo struct { + BackupFinishedDate *string `json:"backupFinishedDate,omitempty"` + BackupSetId *string `json:"backupSetId,omitempty"` + BackupStartDate *string `json:"backupStartDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FirstLsn *string `json:"firstLsn,omitempty"` + IsBackupRestored *bool `json:"isBackupRestored,omitempty"` + LastLsn *string `json:"lastLsn,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + ListOfBackupFiles *[]BackupFileInfo `json:"listOfBackupFiles,omitempty"` +} + +func (o *BackupSetInfo) GetBackupFinishedDateAsTime() (*time.Time, error) { + if o.BackupFinishedDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishedDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupFinishedDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishedDate = &formatted +} + +func (o *BackupSetInfo) GetBackupStartDateAsTime() (*time.Time, error) { + if o.BackupStartDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupStartDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetBackupStartDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupStartDate = &formatted +} + +func (o *BackupSetInfo) GetLastModifiedTimeAsTime() (*time.Time, error) { + if o.LastModifiedTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastModifiedTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupSetInfo) SetLastModifiedTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastModifiedTime = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_blobshare.go b/resource-manager/datamigration/2025-06-30/taskresource/model_blobshare.go new file mode 100644 index 00000000000..83c73dc6712 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_blobshare.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BlobShare struct { + SasUri *string `json:"sasUri,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_commandproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_commandproperties.go new file mode 100644 index 00000000000..20ee67ab0e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_commandproperties.go @@ -0,0 +1,85 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CommandProperties interface { + CommandProperties() BaseCommandPropertiesImpl +} + +var _ CommandProperties = BaseCommandPropertiesImpl{} + +type BaseCommandPropertiesImpl struct { + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s BaseCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s +} + +var _ CommandProperties = RawCommandPropertiesImpl{} + +// RawCommandPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawCommandPropertiesImpl struct { + commandProperties BaseCommandPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawCommandPropertiesImpl) CommandProperties() BaseCommandPropertiesImpl { + return s.commandProperties +} + +func UnmarshalCommandPropertiesImplementation(input []byte) (CommandProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling CommandProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["commandType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureDbSqlMi.Complete") { + var out MigrateMISyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMISyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Sync.Complete.Database") { + var out MigrateSyncCompleteCommandProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSyncCompleteCommandProperties: %+v", err) + } + return out, nil + } + + var parent BaseCommandPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseCommandPropertiesImpl: %+v", err) + } + + return RawCommandPropertiesImpl{ + commandProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connectioninfo.go new file mode 100644 index 00000000000..41c28de5b58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connectioninfo.go @@ -0,0 +1,117 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectionInfo interface { + ConnectionInfo() BaseConnectionInfoImpl +} + +var _ ConnectionInfo = BaseConnectionInfoImpl{} + +type BaseConnectionInfoImpl struct { + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s BaseConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s +} + +var _ ConnectionInfo = RawConnectionInfoImpl{} + +// RawConnectionInfoImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectionInfoImpl struct { + connectionInfo BaseConnectionInfoImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectionInfoImpl) ConnectionInfo() BaseConnectionInfoImpl { + return s.connectionInfo +} + +func UnmarshalConnectionInfoImplementation(input []byte) (ConnectionInfo, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectionInfo into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["type"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MiSqlConnectionInfo") { + var out MiSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MiSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "mongoDbConnectionInfo") { + var out MongoDbConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MySqlConnectionInfo") { + var out MySqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MySqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "OracleConnectionInfo") { + var out OracleConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into OracleConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "PostgreSqlConnectionInfo") { + var out PostgreSqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into PostgreSqlConnectionInfo: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SqlConnectionInfo") { + var out SqlConnectionInfo + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into SqlConnectionInfo: %+v", err) + } + return out, nil + } + + var parent BaseConnectionInfoImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectionInfoImpl: %+v", err) + } + + return RawConnectionInfoImpl{ + connectionInfo: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttomongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttomongodbtaskproperties.go new file mode 100644 index 00000000000..b9a509b04c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttomongodbtaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToMongoDbTaskProperties{} + +type ConnectToMongoDbTaskProperties struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToMongoDbTaskProperties{} + +func (s ConnectToMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Connect.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToMongoDbTaskProperties{} + +func (s *ConnectToMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbConnectionInfo `json:"input,omitempty"` + Output *[]MongoDbClusterInfo `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcemysqltaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcemysqltaskinput.go new file mode 100644 index 00000000000..aa1829dbaba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcemysqltaskinput.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceMySqlTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetPlatform *MySqlTargetPlatformType `json:"targetPlatform,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcemysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcemysqltaskproperties.go new file mode 100644 index 00000000000..4371ff283ff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcemysqltaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceMySqlTaskProperties{} + +type ConnectToSourceMySqlTaskProperties struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceMySqlTaskProperties{} + +func (s ConnectToSourceMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.MySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceMySqlTaskProperties{} + +func (s *ConnectToSourceMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceNonSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcenonsqltaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcenonsqltaskoutput.go new file mode 100644 index 00000000000..be439667ade --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcenonsqltaskoutput.go @@ -0,0 +1,12 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceNonSqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerProperties *ServerProperties `json:"serverProperties,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskinput.go new file mode 100644 index 00000000000..79dc708271c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskinput.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskInput struct { + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskoutput.go new file mode 100644 index 00000000000..21537a2f033 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskoutput.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceOracleSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskproperties.go new file mode 100644 index 00000000000..06c6da562e4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourceoraclesynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceOracleSyncTaskProperties{} + +type ConnectToSourceOracleSyncTaskProperties struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceOracleSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceOracleSyncTaskProperties{} + +func (s ConnectToSourceOracleSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceOracleSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.Oracle.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceOracleSyncTaskProperties{} + +func (s *ConnectToSourceOracleSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceOracleSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceOracleSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceOracleSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceOracleSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskinput.go new file mode 100644 index 00000000000..c7dacb8c34e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..f9b74bf26b1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourcePostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..ddcf050c838 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcepostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourcePostgreSqlSyncTaskProperties{} + +type ConnectToSourcePostgreSqlSyncTaskProperties struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s ConnectToSourcePostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourcePostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.PostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourcePostgreSqlSyncTaskProperties{} + +func (s *ConnectToSourcePostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourcePostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourcePostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourcePostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourcePostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlserversynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlserversynctaskproperties.go new file mode 100644 index 00000000000..6b2dccf3846 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlserversynctaskproperties.go @@ -0,0 +1,121 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerSyncTaskProperties{} + +type ConnectToSourceSqlServerSyncTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerSyncTaskProperties{} + +func (s ConnectToSourceSqlServerSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerSyncTaskProperties{} + +func (s *ConnectToSourceSqlServerSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskinput.go new file mode 100644 index 00000000000..6ee51795a0d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskinput.go @@ -0,0 +1,15 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskInput struct { + CheckPermissionsGroup *ServerLevelPermissionsGroup `json:"checkPermissionsGroup,omitempty"` + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectDatabases *bool `json:"collectDatabases,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + CollectTdeCertificateInfo *bool `json:"collectTdeCertificateInfo,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutput.go new file mode 100644 index 00000000000..7d0e5b767a9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutput.go @@ -0,0 +1,100 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToSourceSqlServerTaskOutput interface { + ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl +} + +var _ ConnectToSourceSqlServerTaskOutput = BaseConnectToSourceSqlServerTaskOutputImpl{} + +type BaseConnectToSourceSqlServerTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s +} + +var _ ConnectToSourceSqlServerTaskOutput = RawConnectToSourceSqlServerTaskOutputImpl{} + +// RawConnectToSourceSqlServerTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawConnectToSourceSqlServerTaskOutputImpl struct { + connectToSourceSqlServerTaskOutput BaseConnectToSourceSqlServerTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawConnectToSourceSqlServerTaskOutputImpl) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return s.connectToSourceSqlServerTaskOutput +} + +func UnmarshalConnectToSourceSqlServerTaskOutputImplementation(input []byte) (ConnectToSourceSqlServerTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TaskLevelOutput") { + var out ConnectToSourceSqlServerTaskOutputTaskLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + return out, nil + } + + var parent BaseConnectToSourceSqlServerTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseConnectToSourceSqlServerTaskOutputImpl: %+v", err) + } + + return RawConnectToSourceSqlServerTaskOutputImpl{ + connectToSourceSqlServerTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go new file mode 100644 index 00000000000..f6ff5f641f5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +type ConnectToSourceSqlServerTaskOutputAgentJobLevel struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + JobCategory *string `json:"jobCategory,omitempty"` + JobOwner *string `json:"jobOwner,omitempty"` + LastExecutedOn *string `json:"lastExecutedOn,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputAgentJobLevel{} + +func (s ConnectToSourceSqlServerTaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go new file mode 100644 index 00000000000..229dc2c28b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputdatabaselevel.go @@ -0,0 +1,56 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +type ConnectToSourceSqlServerTaskOutputDatabaseLevel struct { + CompatibilityLevel *DatabaseCompatLevel `json:"compatibilityLevel,omitempty"` + DatabaseFiles *[]DatabaseFileInfo `json:"databaseFiles,omitempty"` + DatabaseState *DatabaseState `json:"databaseState,omitempty"` + Name *string `json:"name,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputDatabaseLevel{} + +func (s ConnectToSourceSqlServerTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputloginlevel.go new file mode 100644 index 00000000000..bc84d82533b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputloginlevel.go @@ -0,0 +1,56 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +type ConnectToSourceSqlServerTaskOutputLoginLevel struct { + DefaultDatabase *string `json:"defaultDatabase,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + LoginType *LoginType `json:"loginType,omitempty"` + MigrationEligibility *MigrationEligibilityInfo `json:"migrationEligibility,omitempty"` + Name *string `json:"name,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputLoginLevel{} + +func (s ConnectToSourceSqlServerTaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputtasklevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputtasklevel.go new file mode 100644 index 00000000000..302653c00f1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskoutputtasklevel.go @@ -0,0 +1,58 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectToSourceSqlServerTaskOutput = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +type ConnectToSourceSqlServerTaskOutputTaskLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + DatabaseTdeCertificateMapping *map[string]string `json:"databaseTdeCertificateMapping,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` + + // Fields inherited from ConnectToSourceSqlServerTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) ConnectToSourceSqlServerTaskOutput() BaseConnectToSourceSqlServerTaskOutputImpl { + return BaseConnectToSourceSqlServerTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskOutputTaskLevel{} + +func (s ConnectToSourceSqlServerTaskOutputTaskLevel) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskOutputTaskLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + decoded["resultType"] = "TaskLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskOutputTaskLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskproperties.go new file mode 100644 index 00000000000..fc72f0a093b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttosourcesqlservertaskproperties.go @@ -0,0 +1,124 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToSourceSqlServerTaskProperties{} + +type ConnectToSourceSqlServerTaskProperties struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + Output *[]ConnectToSourceSqlServerTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToSourceSqlServerTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToSourceSqlServerTaskProperties{} + +func (s ConnectToSourceSqlServerTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToSourceSqlServerTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToSource.SqlServer" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToSourceSqlServerTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToSourceSqlServerTaskProperties{} + +func (s *ConnectToSourceSqlServerTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToSourceSqlServerTaskInput `json:"input,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToSourceSqlServerTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]ConnectToSourceSqlServerTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalConnectToSourceSqlServerTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'ConnectToSourceSqlServerTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskinput.go new file mode 100644 index 00000000000..db19af7fce4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskInput struct { + IsOfflineMigration *bool `json:"isOfflineMigration,omitempty"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskoutput.go new file mode 100644 index 00000000000..89c026eaed0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskoutput.go @@ -0,0 +1,12 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForMySqlTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskproperties.go new file mode 100644 index 00000000000..39522b0bbfe --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbformysqltaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForMySqlTaskProperties{} + +type ConnectToTargetAzureDbForMySqlTaskProperties struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s ConnectToTargetAzureDbForMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForMySqlTaskProperties{} + +func (s *ConnectToTargetAzureDbForMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForMySqlTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..5cf86899c3a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskInput struct { + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..e9da7d7d83f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput struct { + Databases *[]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..471b1930fcc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..b4ef727339d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput struct { + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..105a63bcf67 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,12 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput struct { + DatabaseSchemaMap *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined `json:"databaseSchemaMap,omitempty"` + Databases *[]string `json:"databases,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go new file mode 100644 index 00000000000..4bed5b09d17 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskoutputdatabaseschemamapinlined.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutputDatabaseSchemaMapInlined struct { + Database *string `json:"database,omitempty"` + Schemas *[]string `json:"schemas,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..128d745ab8f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskinput.go new file mode 100644 index 00000000000..bd7ce1f745c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskInput struct { + QueryObjectCounts *bool `json:"queryObjectCounts,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskoutput.go new file mode 100644 index 00000000000..e020cbc4e0e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskoutput.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlDbTaskOutput struct { + Databases *map[string]string `json:"databases,omitempty"` + Id *string `json:"id,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskproperties.go new file mode 100644 index 00000000000..223d90fb30e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqldbtaskproperties.go @@ -0,0 +1,109 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlDbTaskProperties{} + +type ConnectToTargetSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlDbTaskProperties{} + +func (s ConnectToTargetSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlDbTaskProperties{} + +func (s *ConnectToTargetSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *ConnectToTargetSqlDbTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskinput.go new file mode 100644 index 00000000000..20851a5ebbf --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskoutput.go new file mode 100644 index 00000000000..9d6041e0dd6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMISyncTaskOutput struct { + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskproperties.go new file mode 100644 index 00000000000..dd968d7e50d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMISyncTaskProperties{} + +type ConnectToTargetSqlMISyncTaskProperties struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMISyncTaskProperties{} + +func (s ConnectToTargetSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMISyncTaskProperties{} + +func (s *ConnectToTargetSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskinput.go new file mode 100644 index 00000000000..29534f919ab --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskinput.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskInput struct { + CollectAgentJobs *bool `json:"collectAgentJobs,omitempty"` + CollectLogins *bool `json:"collectLogins,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidateSsisCatalogOnly *bool `json:"validateSsisCatalogOnly,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskoutput.go new file mode 100644 index 00000000000..140caa16e24 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskoutput.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlMITaskOutput struct { + AgentJobs *[]string `json:"agentJobs,omitempty"` + Id *string `json:"id,omitempty"` + Logins *[]string `json:"logins,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskproperties.go new file mode 100644 index 00000000000..0f6ebd7ed28 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlmitaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlMITaskProperties{} + +type ConnectToTargetSqlMITaskProperties struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlMITaskProperties{} + +func (s ConnectToTargetSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlMITaskProperties{} + +func (s *ConnectToTargetSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlMITaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlsqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlsqldbsynctaskinput.go new file mode 100644 index 00000000000..230653df71a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlsqldbsynctaskinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ConnectToTargetSqlSqlDbSyncTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlsqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlsqldbsynctaskproperties.go new file mode 100644 index 00000000000..151f1d7b7f9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_connecttotargetsqlsqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +type ConnectToTargetSqlSqlDbSyncTaskProperties struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s ConnectToTargetSqlSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ConnectToTargetSqlSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ConnectToTarget.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ConnectToTargetSqlSqlDbSyncTaskProperties{} + +func (s *ConnectToTargetSqlSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ConnectToTargetSqlSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]ConnectToTargetSqlDbTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ConnectToTargetSqlSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ConnectToTargetSqlSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_databasebackupinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_databasebackupinfo.go new file mode 100644 index 00000000000..3f35fc9b760 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_databasebackupinfo.go @@ -0,0 +1,33 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseBackupInfo struct { + BackupFiles *[]string `json:"backupFiles,omitempty"` + BackupFinishDate *string `json:"backupFinishDate,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FamilyCount *int64 `json:"familyCount,omitempty"` + IsCompressed *bool `json:"isCompressed,omitempty"` + IsDamaged *bool `json:"isDamaged,omitempty"` + Position *int64 `json:"position,omitempty"` +} + +func (o *DatabaseBackupInfo) GetBackupFinishDateAsTime() (*time.Time, error) { + if o.BackupFinishDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.BackupFinishDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseBackupInfo) SetBackupFinishDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.BackupFinishDate = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_databasefileinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_databasefileinfo.go new file mode 100644 index 00000000000..b985e511a0d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_databasefileinfo.go @@ -0,0 +1,14 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseFileInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + FileType *DatabaseFileType `json:"fileType,omitempty"` + Id *string `json:"id,omitempty"` + LogicalName *string `json:"logicalName,omitempty"` + PhysicalFullName *string `json:"physicalFullName,omitempty"` + RestoreFullName *string `json:"restoreFullName,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_databasesummaryresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_databasesummaryresult.go new file mode 100644 index 00000000000..d5e2bf5eda0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_databasesummaryresult.go @@ -0,0 +1,47 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_databasetable.go b/resource-manager/datamigration/2025-06-30/taskresource/model_databasetable.go new file mode 100644 index 00000000000..259781e85a2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_databasetable.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DatabaseTable struct { + HasRows *bool `json:"hasRows,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_dataintegrityvalidationresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_dataintegrityvalidationresult.go new file mode 100644 index 00000000000..3a6ec62068b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_dataintegrityvalidationresult.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataIntegrityValidationResult struct { + FailedObjects *map[string]string `json:"failedObjects,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_dataitemmigrationsummaryresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_dataitemmigrationsummaryresult.go new file mode 100644 index 00000000000..59f61665ce0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_dataitemmigrationsummaryresult.go @@ -0,0 +1,46 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DataItemMigrationSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + Name *string `json:"name,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` +} + +func (o *DataItemMigrationSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *DataItemMigrationSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *DataItemMigrationSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_executionstatistics.go b/resource-manager/datamigration/2025-06-30/taskresource/model_executionstatistics.go new file mode 100644 index 00000000000..3cba1907533 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_executionstatistics.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExecutionStatistics struct { + CpuTimeMs *float64 `json:"cpuTimeMs,omitempty"` + ElapsedTimeMs *float64 `json:"elapsedTimeMs,omitempty"` + ExecutionCount *int64 `json:"executionCount,omitempty"` + HasErrors *bool `json:"hasErrors,omitempty"` + SqlErrors *[]string `json:"sqlErrors,omitempty"` + WaitStats *map[string]WaitStatistics `json:"waitStats,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_fileshare.go b/resource-manager/datamigration/2025-06-30/taskresource/model_fileshare.go new file mode 100644 index 00000000000..60e2ee8d55e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_fileshare.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileShare struct { + Password *string `json:"password,omitempty"` + Path string `json:"path"` + UserName *string `json:"userName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskinput.go new file mode 100644 index 00000000000..eeb90ec4fd2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskInput struct { + BackupFileShare FileShare `json:"backupFileShare"` + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + SelectedCertificates []SelectedCertificateInput `json:"selectedCertificates"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskoutput.go new file mode 100644 index 00000000000..f73d40720a7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskoutput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetTdeCertificatesSqlTaskOutput struct { + Base64EncodedCertificates *map[string][]string `json:"base64EncodedCertificates,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskproperties.go new file mode 100644 index 00000000000..137a63a1890 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_gettdecertificatessqltaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetTdeCertificatesSqlTaskProperties{} + +type GetTdeCertificatesSqlTaskProperties struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetTdeCertificatesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetTdeCertificatesSqlTaskProperties{} + +func (s GetTdeCertificatesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetTdeCertificatesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetTDECertificates.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetTdeCertificatesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetTdeCertificatesSqlTaskProperties{} + +func (s *GetTdeCertificatesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetTdeCertificatesSqlTaskInput `json:"input,omitempty"` + Output *[]GetTdeCertificatesSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetTdeCertificatesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetTdeCertificatesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskinput.go new file mode 100644 index 00000000000..9e69c16cdfb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskInput struct { + ConnectionInfo MySqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskoutput.go new file mode 100644 index 00000000000..398a1d3936a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskoutput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesMySqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskproperties.go new file mode 100644 index 00000000000..fbf1e5cd34c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesmysqltaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesMySqlTaskProperties{} + +type GetUserTablesMySqlTaskProperties struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesMySqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesMySqlTaskProperties{} + +func (s GetUserTablesMySqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesMySqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesMySqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesMySqlTaskProperties{} + +func (s *GetUserTablesMySqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesMySqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesMySqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesMySqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesMySqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskinput.go new file mode 100644 index 00000000000..c5bad92b148 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskInput struct { + ConnectionInfo OracleConnectionInfo `json:"connectionInfo"` + SelectedSchemas []string `json:"selectedSchemas"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskoutput.go new file mode 100644 index 00000000000..2539c1f582c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskoutput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesOracleTaskOutput struct { + SchemaName *string `json:"schemaName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskproperties.go new file mode 100644 index 00000000000..afc94bfd29d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablesoracletaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesOracleTaskProperties{} + +type GetUserTablesOracleTaskProperties struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesOracleTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesOracleTaskProperties{} + +func (s GetUserTablesOracleTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesOracleTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesOracle" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesOracleTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesOracleTaskProperties{} + +func (s *GetUserTablesOracleTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesOracleTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesOracleTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesOracleTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesOracleTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskinput.go new file mode 100644 index 00000000000..dba8485eeb8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskInput struct { + ConnectionInfo PostgreSqlConnectionInfo `json:"connectionInfo"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskoutput.go new file mode 100644 index 00000000000..4f04507c9e7 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskoutput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesPostgreSqlTaskOutput struct { + DatabaseName *string `json:"databaseName,omitempty"` + Tables *[]DatabaseTable `json:"tables,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskproperties.go new file mode 100644 index 00000000000..87bdbe6086e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablespostgresqltaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesPostgreSqlTaskProperties{} + +type GetUserTablesPostgreSqlTaskProperties struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesPostgreSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesPostgreSqlTaskProperties{} + +func (s GetUserTablesPostgreSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesPostgreSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTablesPostgreSql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesPostgreSqlTaskProperties{} + +func (s *GetUserTablesPostgreSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesPostgreSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesPostgreSqlTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesPostgreSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesPostgreSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskinput.go new file mode 100644 index 00000000000..9770c7268d0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskinput.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskInput struct { + SelectedSourceDatabases []string `json:"selectedSourceDatabases"` + SelectedTargetDatabases []string `json:"selectedTargetDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskoutput.go new file mode 100644 index 00000000000..652b046beed --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskoutput.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlSyncTaskOutput struct { + DatabasesToSourceTables *map[string][]DatabaseTable `json:"databasesToSourceTables,omitempty"` + DatabasesToTargetTables *map[string][]DatabaseTable `json:"databasesToTargetTables,omitempty"` + TableValidationErrors *map[string][]string `json:"tableValidationErrors,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskproperties.go new file mode 100644 index 00000000000..5287c1148b3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlSyncTaskProperties{} + +type GetUserTablesSqlSyncTaskProperties struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlSyncTaskProperties{} + +func (s GetUserTablesSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlSyncTaskProperties{} + +func (s *GetUserTablesSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlSyncTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskinput.go new file mode 100644 index 00000000000..57eb93c5fd2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskInput struct { + ConnectionInfo SqlConnectionInfo `json:"connectionInfo"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []string `json:"selectedDatabases"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskoutput.go new file mode 100644 index 00000000000..d36a8077b19 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskoutput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetUserTablesSqlTaskOutput struct { + DatabasesToTables *map[string][]DatabaseTable `json:"databasesToTables,omitempty"` + Id *string `json:"id,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskproperties.go new file mode 100644 index 00000000000..864d353304e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_getusertablessqltaskproperties.go @@ -0,0 +1,109 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = GetUserTablesSqlTaskProperties{} + +type GetUserTablesSqlTaskProperties struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s GetUserTablesSqlTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = GetUserTablesSqlTaskProperties{} + +func (s GetUserTablesSqlTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper GetUserTablesSqlTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + decoded["taskType"] = "GetUserTables.Sql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling GetUserTablesSqlTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &GetUserTablesSqlTaskProperties{} + +func (s *GetUserTablesSqlTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *GetUserTablesSqlTaskInput `json:"input,omitempty"` + Output *[]GetUserTablesSqlTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling GetUserTablesSqlTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'GetUserTablesSqlTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandinput.go new file mode 100644 index 00000000000..fe3bfa24c91 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandinput.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandInput struct { + SourceDatabaseName string `json:"sourceDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandoutput.go new file mode 100644 index 00000000000..fc9f0f1102e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandoutput.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMISyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandproperties.go new file mode 100644 index 00000000000..831950bcaa3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemisynccompletecommandproperties.go @@ -0,0 +1,55 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateMISyncCompleteCommandProperties{} + +type MigrateMISyncCompleteCommandProperties struct { + Input *MigrateMISyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateMISyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateMISyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateMISyncCompleteCommandProperties{} + +func (s MigrateMISyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMISyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.SqlServer.AzureDbSqlMi.Complete" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMISyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemongodbtaskproperties.go new file mode 100644 index 00000000000..6751409711e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemongodbtaskproperties.go @@ -0,0 +1,121 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMongoDbTaskProperties{} + +type MigrateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMongoDbTaskProperties{} + +func (s MigrateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMongoDbTaskProperties{} + +func (s *MigrateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MongoDbProgress, 0) + for i, val := range listTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go new file mode 100644 index 00000000000..42db4478562 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinedatabaseinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineDatabaseInput struct { + Name *string `json:"name,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskinput.go new file mode 100644 index 00000000000..3d2570fe8ba --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskinput.go @@ -0,0 +1,32 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + MakeSourceServerReadOnly *bool `json:"makeSourceServerReadOnly,omitempty"` + OptionalAgentSettings *map[string]string `json:"optionalAgentSettings,omitempty"` + SelectedDatabases []MigrateMySqlAzureDbForMySqlOfflineDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateMySqlAzureDbForMySqlOfflineTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go new file mode 100644 index 00000000000..766193628dc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutput.go @@ -0,0 +1,100 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutput interface { + MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlOfflineTaskOutput BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlOfflineTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlOfflineTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + migrateMySqlAzureDbForMySqlOfflineTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go new file mode 100644 index 00000000000..850a7decc4a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go new file mode 100644 index 00000000000..792b56e2161 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputerror.go @@ -0,0 +1,52 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go new file mode 100644 index 00000000000..20132b4c6b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go new file mode 100644 index 00000000000..9660660e939 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskoutputtablelevel.go @@ -0,0 +1,61 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlOfflineTaskOutput = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + LastStorageUpdate *string `json:"lastStorageUpdate,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlOfflineTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlOfflineTaskOutput() BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlOfflineTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go new file mode 100644 index 00000000000..fce82dab630 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlofflinetaskproperties.go @@ -0,0 +1,127 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +type MigrateMySqlAzureDbForMySqlOfflineTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlOfflineTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlOfflineTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlOfflineTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlOfflineTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlOfflineTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlOfflineTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlOfflineTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlOfflineTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlOfflineTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlOfflineTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go new file mode 100644 index 00000000000..c151f7b6289 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsyncdatabaseinput.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncDatabaseInput struct { + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskinput.go new file mode 100644 index 00000000000..a3111b43fe3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskInput struct { + SelectedDatabases []MigrateMySqlAzureDbForMySqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo MySqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo MySqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutput.go new file mode 100644 index 00000000000..1377dcd66e6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateMySqlAzureDbForMySqlSyncTaskOutput interface { + MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +type BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s +} + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{} + +// RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl struct { + migrateMySqlAzureDbForMySqlSyncTaskOutput BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return s.migrateMySqlAzureDbForMySqlSyncTaskOutput +} + +func UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(input []byte) (MigrateMySqlAzureDbForMySqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + migrateMySqlAzureDbForMySqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..799a94fd78f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..319318bd52b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go new file mode 100644 index 00000000000..561f9774d8f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputError{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..dc87b8e2103 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..9b51cf03efb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateMySqlAzureDbForMySqlSyncTaskOutput = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +type MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *string `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *string `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *string `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateMySqlAzureDbForMySqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MigrateMySqlAzureDbForMySqlSyncTaskOutput() BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl { + return BaseMigrateMySqlAzureDbForMySqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskproperties.go new file mode 100644 index 00000000000..e9e2e82ca0e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratemysqlazuredbformysqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +type MigrateMySqlAzureDbForMySqlSyncTaskProperties struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateMySqlAzureDbForMySqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s MigrateMySqlAzureDbForMySqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateMySqlAzureDbForMySqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.MySql.AzureDbForMySql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateMySqlAzureDbForMySqlSyncTaskProperties{} + +func (s *MigrateMySqlAzureDbForMySqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateMySqlAzureDbForMySqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateMySqlAzureDbForMySqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateMySqlAzureDbForMySqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateMySqlAzureDbForMySqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateMySqlAzureDbForMySqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..10f26564f6c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,121 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type MigrateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Oracle.AzureDbForPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigrateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateOracleAzureDbPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..0078214f52f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncDatabaseInput struct { + CaseManipulation *string `json:"caseManipulation,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..ad20a079e35 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskInput struct { + SelectedDatabases []MigrateOracleAzureDbPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo OracleConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..63ebbfd06cd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutput interface { + MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +type BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{} + +// RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl struct { + migrateOracleAzureDbPostgreSqlSyncTaskOutput BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return s.migrateOracleAzureDbPostgreSqlSyncTaskOutput +} + +func UnmarshalMigrateOracleAzureDbPostgreSqlSyncTaskOutputImplementation(input []byte) (MigrateOracleAzureDbPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + migrateOracleAzureDbPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..6a0767fa7f3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..ed8830c3928 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..14538d30285 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputError{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..784c9569482 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,57 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..4398264c13e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrateoracleazuredbpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateOracleAzureDbPostgreSqlSyncTaskOutput = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +type MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateOracleAzureDbPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MigrateOracleAzureDbPostgreSqlSyncTaskOutput() BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl { + return BaseMigrateOracleAzureDbPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateOracleAzureDbPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go new file mode 100644 index 00000000000..b6e486b70ea --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabaseinput.go @@ -0,0 +1,14 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]interface{} `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SelectedTables *[]MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput `json:"selectedTables,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go new file mode 100644 index 00000000000..cef81697291 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsyncdatabasetableinput.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseTableInput struct { + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go new file mode 100644 index 00000000000..f08384cb749 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskinput.go @@ -0,0 +1,30 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigratePostgreSqlAzureDbForPostgreSqlSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo PostgreSqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo PostgreSqlConnectionInfo `json:"targetConnectionInfo"` +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..55e990fb668 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutput.go @@ -0,0 +1,108 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput interface { + MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +type BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s +} + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{} + +// RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl struct { + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return s.migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput +} + +func UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(input []byte) (MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl: %+v", err) + } + + return RawMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + migratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..ff0b0349ffc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..53758f4eeb5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go new file mode 100644 index 00000000000..7dc9e2b64f0 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputerror.go @@ -0,0 +1,53 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..294e350aa70 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel struct { + DatabaseCount *float64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerType *ScenarioSource `json:"sourceServerType,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *ReplicateMigrationState `json:"state,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerType *ScenarioTarget `json:"targetServerType,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..bd37564cdf1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput() BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl { + return BaseMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..e336160dd38 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratepostgresqlazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,130 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +type MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigratePostgreSqlAzureDbForPostgreSqlSyncTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigratePostgreSqlAzureDbForPostgreSqlSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbdatabaseinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbdatabaseinput.go new file mode 100644 index 00000000000..158da6ea071 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbdatabaseinput.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbDatabaseInput struct { + Id *string `json:"id,omitempty"` + MakeSourceDbReadOnly *bool `json:"makeSourceDbReadOnly,omitempty"` + Name *string `json:"name,omitempty"` + SchemaSetting *interface{} `json:"schemaSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsyncdatabaseinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsyncdatabaseinput.go new file mode 100644 index 00000000000..8e64d5310e2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsyncdatabaseinput.go @@ -0,0 +1,15 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncDatabaseInput struct { + Id *string `json:"id,omitempty"` + MigrationSetting *map[string]string `json:"migrationSetting,omitempty"` + Name *string `json:"name,omitempty"` + SchemaName *string `json:"schemaName,omitempty"` + SourceSetting *map[string]string `json:"sourceSetting,omitempty"` + TableMap *map[string]string `json:"tableMap,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + TargetSetting *map[string]string `json:"targetSetting,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskinput.go new file mode 100644 index 00000000000..591a42c028f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskinput.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutput.go new file mode 100644 index 00000000000..de0109fc2ad --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutput.go @@ -0,0 +1,108 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbSyncTaskOutput interface { + MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbSyncTaskOutput = RawMigrateSqlServerSqlDbSyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbSyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbSyncTaskOutputImpl struct { + migrateSqlServerSqlDbSyncTaskOutput BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbSyncTaskOutputImpl) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return s.migrateSqlServerSqlDbSyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbSyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbSyncTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbSyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbSyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbSyncTaskOutputImpl{ + migrateSqlServerSqlDbSyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go new file mode 100644 index 00000000000..654f3fac269 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputdatabaseerror.go @@ -0,0 +1,53 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseError struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Events *[]SyncMigrationDatabaseErrorEvent `json:"events,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..1106242db55 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputdatabaselevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel struct { + AppliedChanges *int64 `json:"appliedChanges,omitempty"` + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + FullLoadCompletedTables *int64 `json:"fullLoadCompletedTables,omitempty"` + FullLoadErroredTables *int64 `json:"fullLoadErroredTables,omitempty"` + FullLoadLoadingTables *int64 `json:"fullLoadLoadingTables,omitempty"` + FullLoadQueuedTables *int64 `json:"fullLoadQueuedTables,omitempty"` + IncomingChanges *int64 `json:"incomingChanges,omitempty"` + InitializationCompleted *bool `json:"initializationCompleted,omitempty"` + Latency *int64 `json:"latency,omitempty"` + MigrationState *SyncDatabaseMigrationReportingState `json:"migrationState,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputerror.go new file mode 100644 index 00000000000..ac5bc1957de --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputerror.go @@ -0,0 +1,52 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputError{} + +type MigrateSqlServerSqlDbSyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputError{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..61ca4f54952 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputmigrationlevel.go @@ -0,0 +1,58 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServer *string `json:"sourceServer,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + TargetServer *string `json:"targetServer,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go new file mode 100644 index 00000000000..f2150dc4cd4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskoutputtablelevel.go @@ -0,0 +1,64 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbSyncTaskOutput = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbSyncTaskOutputTableLevel struct { + CdcDeleteCounter *int64 `json:"cdcDeleteCounter,omitempty"` + CdcInsertCounter *int64 `json:"cdcInsertCounter,omitempty"` + CdcUpdateCounter *int64 `json:"cdcUpdateCounter,omitempty"` + DataErrorsCounter *int64 `json:"dataErrorsCounter,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + FullLoadEndedOn *string `json:"fullLoadEndedOn,omitempty"` + FullLoadEstFinishTime *string `json:"fullLoadEstFinishTime,omitempty"` + FullLoadStartedOn *string `json:"fullLoadStartedOn,omitempty"` + FullLoadTotalRows *int64 `json:"fullLoadTotalRows,omitempty"` + LastModifiedTime *string `json:"lastModifiedTime,omitempty"` + State *SyncTableMigrationState `json:"state,omitempty"` + TableName *string `json:"tableName,omitempty"` + TotalChangesApplied *int64 `json:"totalChangesApplied,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbSyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MigrateSqlServerSqlDbSyncTaskOutput() BaseMigrateSqlServerSqlDbSyncTaskOutputImpl { + return BaseMigrateSqlServerSqlDbSyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbSyncTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..133b526e65c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbsynctaskproperties.go @@ -0,0 +1,121 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbSyncTaskProperties{} + +type MigrateSqlServerSqlDbSyncTaskProperties struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlDbSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s MigrateSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbSyncTaskProperties{} + +func (s *MigrateSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSqlServerSqlDbSyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbSyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbSyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskinput.go new file mode 100644 index 00000000000..17f6c6b2d92 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskinput.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskInput struct { + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedDatabases []MigrateSqlServerSqlDbDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` + ValidationOptions *MigrationValidationOptions `json:"validationOptions,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutput.go new file mode 100644 index 00000000000..4efbc1bea0b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutput.go @@ -0,0 +1,116 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlDbTaskOutput interface { + MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl +} + +var _ MigrateSqlServerSqlDbTaskOutput = BaseMigrateSqlServerSqlDbTaskOutputImpl{} + +type BaseMigrateSqlServerSqlDbTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlDbTaskOutput = RawMigrateSqlServerSqlDbTaskOutputImpl{} + +// RawMigrateSqlServerSqlDbTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlDbTaskOutputImpl struct { + migrateSqlServerSqlDbTaskOutput BaseMigrateSqlServerSqlDbTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlDbTaskOutputImpl) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return s.migrateSqlServerSqlDbTaskOutput +} + +func UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(input []byte) (MigrateSqlServerSqlDbTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationDatabaseLevelValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlDbTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "TableLevelOutput") { + var out MigrateSqlServerSqlDbTaskOutputTableLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationValidationOutput") { + var out MigrateSqlServerSqlDbTaskOutputValidationResult + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlDbTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlDbTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlDbTaskOutputImpl{ + migrateSqlServerSqlDbTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go new file mode 100644 index 00000000000..27797a34504 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputdatabaselevel.go @@ -0,0 +1,65 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorCount *int64 `json:"errorCount,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + NumberOfObjects *int64 `json:"numberOfObjects,omitempty"` + NumberOfObjectsCompleted *int64 `json:"numberOfObjectsCompleted,omitempty"` + ObjectSummary *map[string]DataItemMigrationSummaryResult `json:"objectSummary,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go new file mode 100644 index 00000000000..8efa791fc51 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputdatabaselevelvalidationresult.go @@ -0,0 +1,60 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult struct { + DataIntegrityValidationResult *DataIntegrityValidationResult `json:"dataIntegrityValidationResult,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + QueryAnalysisValidationResult *QueryAnalysisValidationResult `json:"queryAnalysisValidationResult,omitempty"` + SchemaValidationResult *SchemaComparisonValidationResult `json:"schemaValidationResult,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationDatabaseLevelValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputDatabaseLevelValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputerror.go new file mode 100644 index 00000000000..14c189addbd --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputerror.go @@ -0,0 +1,52 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputError{} + +type MigrateSqlServerSqlDbTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputError) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputError{} + +func (s MigrateSqlServerSqlDbTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go new file mode 100644 index 00000000000..a66ff2d88ff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlDbTaskOutputMigrationLevel struct { + DatabaseSummary *map[string]DatabaseSummaryResult `json:"databaseSummary,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + DurationInSeconds *int64 `json:"durationInSeconds,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + MigrationReportResult *MigrationReportResult `json:"migrationReportResult,omitempty"` + MigrationValidationResult *MigrationValidationResult `json:"migrationValidationResult,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputtablelevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputtablelevel.go new file mode 100644 index 00000000000..f539775aa39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputtablelevel.go @@ -0,0 +1,60 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +type MigrateSqlServerSqlDbTaskOutputTableLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ItemsCompletedCount *int64 `json:"itemsCompletedCount,omitempty"` + ItemsCount *int64 `json:"itemsCount,omitempty"` + ObjectName *string `json:"objectName,omitempty"` + ResultPrefix *string `json:"resultPrefix,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + StatusMessage *string `json:"statusMessage,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputTableLevel{} + +func (s MigrateSqlServerSqlDbTaskOutputTableLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputTableLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + decoded["resultType"] = "TableLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputTableLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputvalidationresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputvalidationresult.go new file mode 100644 index 00000000000..87ca3e60cdc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskoutputvalidationresult.go @@ -0,0 +1,54 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlDbTaskOutput = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +type MigrateSqlServerSqlDbTaskOutputValidationResult struct { + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` + + // Fields inherited from MigrateSqlServerSqlDbTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MigrateSqlServerSqlDbTaskOutput() BaseMigrateSqlServerSqlDbTaskOutputImpl { + return BaseMigrateSqlServerSqlDbTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskOutputValidationResult{} + +func (s MigrateSqlServerSqlDbTaskOutputValidationResult) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskOutputValidationResult + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + decoded["resultType"] = "MigrationValidationOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskOutputValidationResult: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskproperties.go new file mode 100644 index 00000000000..159de259e58 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqldbtaskproperties.go @@ -0,0 +1,130 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlDbTaskProperties{} + +type MigrateSqlServerSqlDbTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlDbTaskOutput `json:"output,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlDbTaskProperties{} + +func (s MigrateSqlServerSqlDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.SqlDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlDbTaskProperties{} + +func (s *MigrateSqlServerSqlDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlDbTaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlDbTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlDbTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmidatabaseinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmidatabaseinput.go new file mode 100644 index 00000000000..b69a42b500c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmidatabaseinput.go @@ -0,0 +1,12 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMIDatabaseInput struct { + BackupFilePaths *[]string `json:"backupFilePaths,omitempty"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + Id *string `json:"id,omitempty"` + Name string `json:"name"` + RestoreDatabaseName string `json:"restoreDatabaseName"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..820f8013d06 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskinput.go @@ -0,0 +1,14 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + NumberOfParallelDatabaseMigrations *float64 `json:"numberOfParallelDatabaseMigrations,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..522b6061ed5 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutput.go @@ -0,0 +1,92 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMISyncTaskOutput interface { + MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = BaseMigrateSqlServerSqlMISyncTaskOutputImpl{} + +type BaseMigrateSqlServerSqlMISyncTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMISyncTaskOutput = RawMigrateSqlServerSqlMISyncTaskOutputImpl{} + +// RawMigrateSqlServerSqlMISyncTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMISyncTaskOutputImpl struct { + migrateSqlServerSqlMISyncTaskOutput BaseMigrateSqlServerSqlMISyncTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMISyncTaskOutputImpl) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return s.migrateSqlServerSqlMISyncTaskOutput +} + +func UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(input []byte) (MigrateSqlServerSqlMISyncTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMISyncTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMISyncTaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMISyncTaskOutputImpl{ + migrateSqlServerSqlMISyncTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go new file mode 100644 index 00000000000..ad81f2f672a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputdatabaselevel.go @@ -0,0 +1,62 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel struct { + ActiveBackupSets *[]BackupSetInfo `json:"activeBackupSets,omitempty"` + ContainerName *string `json:"containerName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ErrorPrefix *string `json:"errorPrefix,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FullBackupSetInfo *BackupSetInfo `json:"fullBackupSetInfo,omitempty"` + IsFullBackupRestored *bool `json:"isFullBackupRestored,omitempty"` + LastRestoredBackupSetInfo *BackupSetInfo `json:"lastRestoredBackupSetInfo,omitempty"` + MigrationState *DatabaseMigrationState `json:"migrationState,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputerror.go new file mode 100644 index 00000000000..6994f68ac59 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputerror.go @@ -0,0 +1,52 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputError{} + +type MigrateSqlServerSqlMISyncTaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputError{} + +func (s MigrateSqlServerSqlMISyncTaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go new file mode 100644 index 00000000000..bab9e87373e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskoutputmigrationlevel.go @@ -0,0 +1,62 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMISyncTaskOutput = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMISyncTaskOutputMigrationLevel struct { + DatabaseCount *int64 `json:"databaseCount,omitempty"` + DatabaseErrorCount *int64 `json:"databaseErrorCount,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerName *string `json:"sourceServerName,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerName *string `json:"targetServerName,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMISyncTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MigrateSqlServerSqlMISyncTaskOutput() BaseMigrateSqlServerSqlMISyncTaskOutputImpl { + return BaseMigrateSqlServerSqlMISyncTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMISyncTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..ebd6b2c5fdc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmisynctaskproperties.go @@ -0,0 +1,124 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMISyncTaskProperties{} + +type MigrateSqlServerSqlMISyncTaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]MigrateSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMISyncTaskProperties{} + +func (s MigrateSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMISyncTaskProperties{} + +func (s *MigrateSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMISyncTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMISyncTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMISyncTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskinput.go new file mode 100644 index 00000000000..90f4d53e0c6 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskinput.go @@ -0,0 +1,18 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskInput struct { + AadDomainName *string `json:"aadDomainName,omitempty"` + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + EncryptedKeyForSecureFields *string `json:"encryptedKeyForSecureFields,omitempty"` + SelectedAgentJobs *[]string `json:"selectedAgentJobs,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StartedOn *string `json:"startedOn,omitempty"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..14658e663ac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutput.go @@ -0,0 +1,108 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSqlServerSqlMITaskOutput interface { + MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl +} + +var _ MigrateSqlServerSqlMITaskOutput = BaseMigrateSqlServerSqlMITaskOutputImpl{} + +type BaseMigrateSqlServerSqlMITaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s +} + +var _ MigrateSqlServerSqlMITaskOutput = RawMigrateSqlServerSqlMITaskOutputImpl{} + +// RawMigrateSqlServerSqlMITaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSqlServerSqlMITaskOutputImpl struct { + migrateSqlServerSqlMITaskOutput BaseMigrateSqlServerSqlMITaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSqlServerSqlMITaskOutputImpl) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return s.migrateSqlServerSqlMITaskOutput +} + +func UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(input []byte) (MigrateSqlServerSqlMITaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "AgentJobLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputAgentJobLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "DatabaseLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputDatabaseLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ErrorOutput") { + var out MigrateSqlServerSqlMITaskOutputError + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "LoginLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputLoginLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSqlServerSqlMITaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSqlServerSqlMITaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSqlServerSqlMITaskOutputImpl: %+v", err) + } + + return RawMigrateSqlServerSqlMITaskOutputImpl{ + migrateSqlServerSqlMITaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go new file mode 100644 index 00000000000..04f2d5f4733 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputagentjoblevel.go @@ -0,0 +1,58 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +type MigrateSqlServerSqlMITaskOutputAgentJobLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + Message *string `json:"message,omitempty"` + Name *string `json:"name,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputAgentJobLevel{} + +func (s MigrateSqlServerSqlMITaskOutputAgentJobLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputAgentJobLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + decoded["resultType"] = "AgentJobLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputAgentJobLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go new file mode 100644 index 00000000000..9830f68704f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputdatabaselevel.go @@ -0,0 +1,59 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +type MigrateSqlServerSqlMITaskOutputDatabaseLevel struct { + DatabaseName *string `json:"databaseName,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SizeMB *float64 `json:"sizeMB,omitempty"` + Stage *DatabaseMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputDatabaseLevel{} + +func (s MigrateSqlServerSqlMITaskOutputDatabaseLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputDatabaseLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + decoded["resultType"] = "DatabaseLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputDatabaseLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputerror.go new file mode 100644 index 00000000000..a853d356e39 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputerror.go @@ -0,0 +1,52 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputError{} + +type MigrateSqlServerSqlMITaskOutputError struct { + Error *ReportableException `json:"error,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputError) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputError{} + +func (s MigrateSqlServerSqlMITaskOutputError) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputError + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + decoded["resultType"] = "ErrorOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputError: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputloginlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputloginlevel.go new file mode 100644 index 00000000000..e3a4f03db53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputloginlevel.go @@ -0,0 +1,58 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +type MigrateSqlServerSqlMITaskOutputLoginLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + LoginName *string `json:"loginName,omitempty"` + Message *string `json:"message,omitempty"` + Stage *LoginMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputLoginLevel{} + +func (s MigrateSqlServerSqlMITaskOutputLoginLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputLoginLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + decoded["resultType"] = "LoginLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputLoginLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go new file mode 100644 index 00000000000..b6a693639db --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskoutputmigrationlevel.go @@ -0,0 +1,66 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSqlServerSqlMITaskOutput = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +type MigrateSqlServerSqlMITaskOutputMigrationLevel struct { + AgentJobs *map[string]string `json:"agentJobs,omitempty"` + Databases *map[string]string `json:"databases,omitempty"` + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Logins *map[string]string `json:"logins,omitempty"` + Message *string `json:"message,omitempty"` + OrphanedUsersInfo *[]OrphanedUserInfo `json:"orphanedUsersInfo,omitempty"` + ServerRoleResults *map[string]StartMigrationScenarioServerRoleResult `json:"serverRoleResults,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSqlServerSqlMITaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MigrateSqlServerSqlMITaskOutput() BaseMigrateSqlServerSqlMITaskOutputImpl { + return BaseMigrateSqlServerSqlMITaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskOutputMigrationLevel{} + +func (s MigrateSqlServerSqlMITaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..df270ba17a8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesqlserversqlmitaskproperties.go @@ -0,0 +1,133 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSqlServerSqlMITaskProperties{} + +type MigrateSqlServerSqlMITaskProperties struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + Output *[]MigrateSqlServerSqlMITaskOutput `json:"output,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSqlServerSqlMITaskProperties{} + +func (s MigrateSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSqlServerSqlMITaskProperties{} + +func (s *MigrateSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + CreatedOn *string `json:"createdOn,omitempty"` + Input *MigrateSqlServerSqlMITaskInput `json:"input,omitempty"` + IsCloneable *bool `json:"isCloneable,omitempty"` + ParentTaskId *string `json:"parentTaskId,omitempty"` + TaskId *string `json:"taskId,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.CreatedOn = decoded.CreatedOn + s.Input = decoded.Input + s.IsCloneable = decoded.IsCloneable + s.ParentTaskId = decoded.ParentTaskId + s.TaskId = decoded.TaskId + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSqlServerSqlMITaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSqlServerSqlMITaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskinput.go new file mode 100644 index 00000000000..21e69f0da78 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskInput struct { + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + SsisMigrationInfo SsisMigrationInfo `json:"ssisMigrationInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutput.go new file mode 100644 index 00000000000..68eab3f91ec --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutput.go @@ -0,0 +1,84 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSsisTaskOutput interface { + MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl +} + +var _ MigrateSsisTaskOutput = BaseMigrateSsisTaskOutputImpl{} + +type BaseMigrateSsisTaskOutputImpl struct { + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s BaseMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s +} + +var _ MigrateSsisTaskOutput = RawMigrateSsisTaskOutputImpl{} + +// RawMigrateSsisTaskOutputImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMigrateSsisTaskOutputImpl struct { + migrateSsisTaskOutput BaseMigrateSsisTaskOutputImpl + Type string + Values map[string]interface{} +} + +func (s RawMigrateSsisTaskOutputImpl) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return s.migrateSsisTaskOutput +} + +func UnmarshalMigrateSsisTaskOutputImplementation(input []byte) (MigrateSsisTaskOutput, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutput into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "MigrationLevelOutput") { + var out MigrateSsisTaskOutputMigrationLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "SsisProjectLevelOutput") { + var out MigrateSsisTaskOutputProjectLevel + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskOutputProjectLevel: %+v", err) + } + return out, nil + } + + var parent BaseMigrateSsisTaskOutputImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMigrateSsisTaskOutputImpl: %+v", err) + } + + return RawMigrateSsisTaskOutputImpl{ + migrateSsisTaskOutput: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutputmigrationlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutputmigrationlevel.go new file mode 100644 index 00000000000..13aa99abe53 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutputmigrationlevel.go @@ -0,0 +1,61 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputMigrationLevel{} + +type MigrateSsisTaskOutputMigrationLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Message *string `json:"message,omitempty"` + SourceServerBrandVersion *string `json:"sourceServerBrandVersion,omitempty"` + SourceServerVersion *string `json:"sourceServerVersion,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *MigrationStatus `json:"status,omitempty"` + TargetServerBrandVersion *string `json:"targetServerBrandVersion,omitempty"` + TargetServerVersion *string `json:"targetServerVersion,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputMigrationLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputMigrationLevel{} + +func (s MigrateSsisTaskOutputMigrationLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputMigrationLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + decoded["resultType"] = "MigrationLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputMigrationLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutputprojectlevel.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutputprojectlevel.go new file mode 100644 index 00000000000..57107d205ff --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskoutputprojectlevel.go @@ -0,0 +1,59 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MigrateSsisTaskOutput = MigrateSsisTaskOutputProjectLevel{} + +type MigrateSsisTaskOutputProjectLevel struct { + EndedOn *string `json:"endedOn,omitempty"` + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + FolderName *string `json:"folderName,omitempty"` + Message *string `json:"message,omitempty"` + ProjectName *string `json:"projectName,omitempty"` + Stage *SsisMigrationStage `json:"stage,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + State *MigrationState `json:"state,omitempty"` + + // Fields inherited from MigrateSsisTaskOutput + + Id *string `json:"id,omitempty"` + ResultType string `json:"resultType"` +} + +func (s MigrateSsisTaskOutputProjectLevel) MigrateSsisTaskOutput() BaseMigrateSsisTaskOutputImpl { + return BaseMigrateSsisTaskOutputImpl{ + Id: s.Id, + ResultType: s.ResultType, + } +} + +var _ json.Marshaler = MigrateSsisTaskOutputProjectLevel{} + +func (s MigrateSsisTaskOutputProjectLevel) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskOutputProjectLevel + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + decoded["resultType"] = "SsisProjectLevelOutput" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskOutputProjectLevel: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskproperties.go new file mode 100644 index 00000000000..b2eab2b8f3d --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratessistaskproperties.go @@ -0,0 +1,121 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = MigrateSsisTaskProperties{} + +type MigrateSsisTaskProperties struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + Output *[]MigrateSsisTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s MigrateSsisTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = MigrateSsisTaskProperties{} + +func (s MigrateSsisTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSsisTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSsisTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSsisTaskProperties: %+v", err) + } + + decoded["taskType"] = "Migrate.Ssis" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSsisTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MigrateSsisTaskProperties{} + +func (s *MigrateSsisTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateSsisTaskInput `json:"input,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MigrateSsisTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + if v, ok := temp["output"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Output into list []json.RawMessage: %+v", err) + } + + output := make([]MigrateSsisTaskOutput, 0) + for i, val := range listTemp { + impl, err := UnmarshalMigrateSsisTaskOutputImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Output' for 'MigrateSsisTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Output = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandinput.go new file mode 100644 index 00000000000..281d8564318 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandinput.go @@ -0,0 +1,27 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandInput struct { + CommitTimeStamp *string `json:"commitTimeStamp,omitempty"` + DatabaseName string `json:"databaseName"` +} + +func (o *MigrateSyncCompleteCommandInput) GetCommitTimeStampAsTime() (*time.Time, error) { + if o.CommitTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CommitTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrateSyncCompleteCommandInput) SetCommitTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CommitTimeStamp = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandoutput.go new file mode 100644 index 00000000000..a1a9245d90e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandoutput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrateSyncCompleteCommandOutput struct { + Errors *[]ReportableException `json:"errors,omitempty"` + Id *string `json:"id,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandproperties.go new file mode 100644 index 00000000000..a999fd61e60 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migratesynccompletecommandproperties.go @@ -0,0 +1,56 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ CommandProperties = MigrateSyncCompleteCommandProperties{} + +type MigrateSyncCompleteCommandProperties struct { + CommandId *string `json:"commandId,omitempty"` + Input *MigrateSyncCompleteCommandInput `json:"input,omitempty"` + Output *MigrateSyncCompleteCommandOutput `json:"output,omitempty"` + + // Fields inherited from CommandProperties + + CommandType CommandType `json:"commandType"` + Errors *[]ODataError `json:"errors,omitempty"` + State *CommandState `json:"state,omitempty"` +} + +func (s MigrateSyncCompleteCommandProperties) CommandProperties() BaseCommandPropertiesImpl { + return BaseCommandPropertiesImpl{ + CommandType: s.CommandType, + Errors: s.Errors, + State: s.State, + } +} + +var _ json.Marshaler = MigrateSyncCompleteCommandProperties{} + +func (s MigrateSyncCompleteCommandProperties) MarshalJSON() ([]byte, error) { + type wrapper MigrateSyncCompleteCommandProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + decoded["commandType"] = "Migrate.Sync.Complete.Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MigrateSyncCompleteCommandProperties: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrationeligibilityinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationeligibilityinfo.go new file mode 100644 index 00000000000..2c95a8d88fa --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationeligibilityinfo.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationEligibilityInfo struct { + IsEligibleForMigration *bool `json:"isEligibleForMigration,omitempty"` + ValidationMessages *[]string `json:"validationMessages,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrationreportresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationreportresult.go new file mode 100644 index 00000000000..21617dd3dfc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationreportresult.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationReportResult struct { + Id *string `json:"id,omitempty"` + ReportURL *string `json:"reportUrl,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationdatabasesummaryresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationdatabasesummaryresult.go new file mode 100644 index 00000000000..7d4d8b8eb6e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationdatabasesummaryresult.go @@ -0,0 +1,44 @@ +package taskresource + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationDatabaseSummaryResult struct { + EndedOn *string `json:"endedOn,omitempty"` + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + SourceDatabaseName *string `json:"sourceDatabaseName,omitempty"` + StartedOn *string `json:"startedOn,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + TargetDatabaseName *string `json:"targetDatabaseName,omitempty"` +} + +func (o *MigrationValidationDatabaseSummaryResult) GetEndedOnAsTime() (*time.Time, error) { + if o.EndedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.EndedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetEndedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.EndedOn = &formatted +} + +func (o *MigrationValidationDatabaseSummaryResult) GetStartedOnAsTime() (*time.Time, error) { + if o.StartedOn == nil { + return nil, nil + } + return dates.ParseAsFormat(o.StartedOn, "2006-01-02T15:04:05Z07:00") +} + +func (o *MigrationValidationDatabaseSummaryResult) SetStartedOnAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.StartedOn = &formatted +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationoptions.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationoptions.go new file mode 100644 index 00000000000..d463dfa9216 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationoptions.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationOptions struct { + EnableDataIntegrityValidation *bool `json:"enableDataIntegrityValidation,omitempty"` + EnableQueryAnalysisValidation *bool `json:"enableQueryAnalysisValidation,omitempty"` + EnableSchemaValidation *bool `json:"enableSchemaValidation,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationresult.go new file mode 100644 index 00000000000..586711d6c6b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_migrationvalidationresult.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MigrationValidationResult struct { + Id *string `json:"id,omitempty"` + MigrationId *string `json:"migrationId,omitempty"` + Status *ValidationStatus `json:"status,omitempty"` + SummaryResults *map[string]MigrationValidationDatabaseSummaryResult `json:"summaryResults,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_misqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_misqlconnectioninfo.go new file mode 100644 index 00000000000..14cfe28c18f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_misqlconnectioninfo.go @@ -0,0 +1,54 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MiSqlConnectionInfo{} + +type MiSqlConnectionInfo struct { + ManagedInstanceResourceId string `json:"managedInstanceResourceId"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MiSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MiSqlConnectionInfo{} + +func (s MiSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MiSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MiSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MiSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MiSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MiSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbclusterinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbclusterinfo.go new file mode 100644 index 00000000000..549cec8c69a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbclusterinfo.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbClusterInfo struct { + Databases []MongoDbDatabaseInfo `json:"databases"` + SupportsSharding bool `json:"supportsSharding"` + Type MongoDbClusterType `json:"type"` + Version string `json:"version"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectioninfo.go new file mode 100644 index 00000000000..8c6e97347b8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectioninfo.go @@ -0,0 +1,19 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + DataSize int64 `json:"dataSize"` + DatabaseName string `json:"databaseName"` + DocumentCount int64 `json:"documentCount"` + IsCapped bool `json:"isCapped"` + IsSystemCollection bool `json:"isSystemCollection"` + IsView bool `json:"isView"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + ShardKey *MongoDbShardKeyInfo `json:"shardKey,omitempty"` + SupportsSharding bool `json:"supportsSharding"` + ViewOf *string `json:"viewOf,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectionprogress.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectionprogress.go new file mode 100644 index 00000000000..5e907aa6e90 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectionprogress.go @@ -0,0 +1,102 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbCollectionProgress{} + +type MongoDbCollectionProgress struct { + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbCollectionProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbCollectionProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbCollectionProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbCollectionProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbCollectionProgress{} + +func (s MongoDbCollectionProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbCollectionProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbCollectionProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbCollectionProgress: %+v", err) + } + + decoded["resultType"] = "Collection" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbCollectionProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectionsettings.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectionsettings.go new file mode 100644 index 00000000000..15a1e01270e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbcollectionsettings.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbCollectionSettings struct { + CanDelete *bool `json:"canDelete,omitempty"` + ShardKey *MongoDbShardKeySetting `json:"shardKey,omitempty"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbconnectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbconnectioninfo.go new file mode 100644 index 00000000000..c54f00935ed --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbconnectioninfo.go @@ -0,0 +1,64 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MongoDbConnectionInfo{} + +type MongoDbConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + ConnectionString string `json:"connectionString"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + EnforceSSL *bool `json:"enforceSSL,omitempty"` + Port *int64 `json:"port,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MongoDbConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MongoDbConnectionInfo{} + +func (s MongoDbConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MongoDbConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbConnectionInfo: %+v", err) + } + + decoded["type"] = "mongoDbConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabaseinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabaseinfo.go new file mode 100644 index 00000000000..b85b19bd486 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabaseinfo.go @@ -0,0 +1,14 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseInfo struct { + AverageDocumentSize int64 `json:"averageDocumentSize"` + Collections []MongoDbCollectionInfo `json:"collections"` + DataSize int64 `json:"dataSize"` + DocumentCount int64 `json:"documentCount"` + Name string `json:"name"` + QualifiedName string `json:"qualifiedName"` + SupportsSharding bool `json:"supportsSharding"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabaseprogress.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabaseprogress.go new file mode 100644 index 00000000000..47ee56b18bc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabaseprogress.go @@ -0,0 +1,166 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbDatabaseProgress{} + +type MongoDbDatabaseProgress struct { + Collections *map[string]MongoDbProgress `json:"collections,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbDatabaseProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbDatabaseProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbDatabaseProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbDatabaseProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbDatabaseProgress{} + +func (s MongoDbDatabaseProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbDatabaseProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbDatabaseProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbDatabaseProgress: %+v", err) + } + + decoded["resultType"] = "Database" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbDatabaseProgress: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &MongoDbDatabaseProgress{} + +func (s *MongoDbDatabaseProgress) UnmarshalJSON(bytes []byte) error { + var decoded struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.BytesCopied = decoded.BytesCopied + s.DocumentsCopied = decoded.DocumentsCopied + s.ElapsedTime = decoded.ElapsedTime + s.Errors = decoded.Errors + s.EventsPending = decoded.EventsPending + s.EventsReplayed = decoded.EventsReplayed + s.LastEventTime = decoded.LastEventTime + s.LastReplayTime = decoded.LastReplayTime + s.Name = decoded.Name + s.QualifiedName = decoded.QualifiedName + s.ResultType = decoded.ResultType + s.State = decoded.State + s.TotalBytes = decoded.TotalBytes + s.TotalDocuments = decoded.TotalDocuments + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling MongoDbDatabaseProgress into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["collections"]; ok { + var dictionaryTemp map[string]json.RawMessage + if err := json.Unmarshal(v, &dictionaryTemp); err != nil { + return fmt.Errorf("unmarshaling Collections into dictionary map[string]json.RawMessage: %+v", err) + } + + output := make(map[string]MongoDbProgress) + for key, val := range dictionaryTemp { + impl, err := UnmarshalMongoDbProgressImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling key %q field 'Collections' for 'MongoDbDatabaseProgress': %+v", key, err) + } + output[key] = impl + } + s.Collections = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabasesettings.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabasesettings.go new file mode 100644 index 00000000000..e0e5bdea319 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbdatabasesettings.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbDatabaseSettings struct { + Collections map[string]MongoDbCollectionSettings `json:"collections"` + TargetRUs *int64 `json:"targetRUs,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodberror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodberror.go new file mode 100644 index 00000000000..06fbbe5571e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodberror.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbError struct { + Code *string `json:"code,omitempty"` + Count *int64 `json:"count,omitempty"` + Message *string `json:"message,omitempty"` + Type *MongoDbErrorType `json:"type,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbmigrationprogress.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbmigrationprogress.go new file mode 100644 index 00000000000..f16cb69a297 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbmigrationprogress.go @@ -0,0 +1,103 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ MongoDbProgress = MongoDbMigrationProgress{} + +type MongoDbMigrationProgress struct { + Databases *map[string]MongoDbDatabaseProgress `json:"databases,omitempty"` + + // Fields inherited from MongoDbProgress + + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s MongoDbMigrationProgress) MongoDbProgress() BaseMongoDbProgressImpl { + return BaseMongoDbProgressImpl{ + BytesCopied: s.BytesCopied, + DocumentsCopied: s.DocumentsCopied, + ElapsedTime: s.ElapsedTime, + Errors: s.Errors, + EventsPending: s.EventsPending, + EventsReplayed: s.EventsReplayed, + LastEventTime: s.LastEventTime, + LastReplayTime: s.LastReplayTime, + Name: s.Name, + QualifiedName: s.QualifiedName, + ResultType: s.ResultType, + State: s.State, + TotalBytes: s.TotalBytes, + TotalDocuments: s.TotalDocuments, + } +} + +func (o *MongoDbMigrationProgress) GetLastEventTimeAsTime() (*time.Time, error) { + if o.LastEventTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastEventTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastEventTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastEventTime = &formatted +} + +func (o *MongoDbMigrationProgress) GetLastReplayTimeAsTime() (*time.Time, error) { + if o.LastReplayTime == nil { + return nil, nil + } + return dates.ParseAsFormat(o.LastReplayTime, "2006-01-02T15:04:05Z07:00") +} + +func (o *MongoDbMigrationProgress) SetLastReplayTimeAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.LastReplayTime = &formatted +} + +var _ json.Marshaler = MongoDbMigrationProgress{} + +func (s MongoDbMigrationProgress) MarshalJSON() ([]byte, error) { + type wrapper MongoDbMigrationProgress + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MongoDbMigrationProgress: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbMigrationProgress: %+v", err) + } + + decoded["resultType"] = "Migration" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MongoDbMigrationProgress: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbmigrationsettings.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbmigrationsettings.go new file mode 100644 index 00000000000..80b9d880800 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbmigrationsettings.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbMigrationSettings struct { + BoostRUs *int64 `json:"boostRUs,omitempty"` + Databases map[string]MongoDbDatabaseSettings `json:"databases"` + Replication *MongoDbReplication `json:"replication,omitempty"` + Source MongoDbConnectionInfo `json:"source"` + Target MongoDbConnectionInfo `json:"target"` + Throttling *MongoDbThrottlingSettings `json:"throttling,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbprogress.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbprogress.go new file mode 100644 index 00000000000..a08fa755b2e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbprogress.go @@ -0,0 +1,104 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbProgress interface { + MongoDbProgress() BaseMongoDbProgressImpl +} + +var _ MongoDbProgress = BaseMongoDbProgressImpl{} + +type BaseMongoDbProgressImpl struct { + BytesCopied int64 `json:"bytesCopied"` + DocumentsCopied int64 `json:"documentsCopied"` + ElapsedTime string `json:"elapsedTime"` + Errors map[string]MongoDbError `json:"errors"` + EventsPending int64 `json:"eventsPending"` + EventsReplayed int64 `json:"eventsReplayed"` + LastEventTime *string `json:"lastEventTime,omitempty"` + LastReplayTime *string `json:"lastReplayTime,omitempty"` + Name *string `json:"name,omitempty"` + QualifiedName *string `json:"qualifiedName,omitempty"` + ResultType ResultType `json:"resultType"` + State MongoDbMigrationState `json:"state"` + TotalBytes int64 `json:"totalBytes"` + TotalDocuments int64 `json:"totalDocuments"` +} + +func (s BaseMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s +} + +var _ MongoDbProgress = RawMongoDbProgressImpl{} + +// RawMongoDbProgressImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawMongoDbProgressImpl struct { + mongoDbProgress BaseMongoDbProgressImpl + Type string + Values map[string]interface{} +} + +func (s RawMongoDbProgressImpl) MongoDbProgress() BaseMongoDbProgressImpl { + return s.mongoDbProgress +} + +func UnmarshalMongoDbProgressImplementation(input []byte) (MongoDbProgress, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling MongoDbProgress into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["resultType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Collection") { + var out MongoDbCollectionProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbCollectionProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Database") { + var out MongoDbDatabaseProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbDatabaseProgress: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migration") { + var out MongoDbMigrationProgress + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MongoDbMigrationProgress: %+v", err) + } + return out, nil + } + + var parent BaseMongoDbProgressImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseMongoDbProgressImpl: %+v", err) + } + + return RawMongoDbProgressImpl{ + mongoDbProgress: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeyfield.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeyfield.go new file mode 100644 index 00000000000..37b7fdc0212 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeyfield.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyField struct { + Name string `json:"name"` + Order MongoDbShardKeyOrder `json:"order"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeyinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeyinfo.go new file mode 100644 index 00000000000..d885ca84655 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeyinfo.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeyInfo struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique bool `json:"isUnique"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeysetting.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeysetting.go new file mode 100644 index 00000000000..217693034ac --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbshardkeysetting.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbShardKeySetting struct { + Fields []MongoDbShardKeyField `json:"fields"` + IsUnique *bool `json:"isUnique,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbthrottlingsettings.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbthrottlingsettings.go new file mode 100644 index 00000000000..84c8c791408 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mongodbthrottlingsettings.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MongoDbThrottlingSettings struct { + MaxParallelism *int64 `json:"maxParallelism,omitempty"` + MinFreeCPU *int64 `json:"minFreeCpu,omitempty"` + MinFreeMemoryMb *int64 `json:"minFreeMemoryMb,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_mysqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_mysqlconnectioninfo.go new file mode 100644 index 00000000000..a3d28faa62c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_mysqlconnectioninfo.go @@ -0,0 +1,59 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = MySqlConnectionInfo{} + +type MySqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerName string `json:"serverName"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s MySqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = MySqlConnectionInfo{} + +func (s MySqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper MySqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling MySqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling MySqlConnectionInfo: %+v", err) + } + + decoded["type"] = "MySqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling MySqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_odataerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_odataerror.go new file mode 100644 index 00000000000..56fbb2e03cb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_odataerror.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ODataError struct { + Code *string `json:"code,omitempty"` + Details *[]ODataError `json:"details,omitempty"` + Message *string `json:"message,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_oracleconnectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_oracleconnectioninfo.go new file mode 100644 index 00000000000..85a0dba2769 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_oracleconnectioninfo.go @@ -0,0 +1,58 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = OracleConnectionInfo{} + +type OracleConnectionInfo struct { + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + Port *int64 `json:"port,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s OracleConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = OracleConnectionInfo{} + +func (s OracleConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper OracleConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling OracleConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling OracleConnectionInfo: %+v", err) + } + + decoded["type"] = "OracleConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling OracleConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_orphaneduserinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_orphaneduserinfo.go new file mode 100644 index 00000000000..8127e8f1fe2 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_orphaneduserinfo.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type OrphanedUserInfo struct { + DatabaseName *string `json:"databaseName,omitempty"` + Name *string `json:"name,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_postgresqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_postgresqlconnectioninfo.go new file mode 100644 index 00000000000..f4ec86f5bee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_postgresqlconnectioninfo.go @@ -0,0 +1,63 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = PostgreSqlConnectionInfo{} + +type PostgreSqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource *string `json:"dataSource,omitempty"` + DatabaseName *string `json:"databaseName,omitempty"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Port int64 `json:"port"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName string `json:"serverName"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s PostgreSqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = PostgreSqlConnectionInfo{} + +func (s PostgreSqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper PostgreSqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling PostgreSqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling PostgreSqlConnectionInfo: %+v", err) + } + + decoded["type"] = "PostgreSqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling PostgreSqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_projecttask.go b/resource-manager/datamigration/2025-06-30/taskresource/model_projecttask.go new file mode 100644 index 00000000000..b0fb727b15e --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_projecttask.go @@ -0,0 +1,56 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTask struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties ProjectTaskProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} + +var _ json.Unmarshaler = &ProjectTask{} + +func (s *ProjectTask) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Etag = decoded.Etag + s.Id = decoded.Id + s.Name = decoded.Name + s.SystemData = decoded.SystemData + s.Type = decoded.Type + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ProjectTask into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["properties"]; ok { + impl, err := UnmarshalProjectTaskPropertiesImplementation(v) + if err != nil { + return fmt.Errorf("unmarshaling field 'Properties' for 'ProjectTask': %+v", err) + } + s.Properties = impl + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_projecttaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_projecttaskproperties.go new file mode 100644 index 00000000000..e8ed9d2251c --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_projecttaskproperties.go @@ -0,0 +1,386 @@ +package taskresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProjectTaskProperties interface { + ProjectTaskProperties() BaseProjectTaskPropertiesImpl +} + +var _ ProjectTaskProperties = BaseProjectTaskPropertiesImpl{} + +type BaseProjectTaskPropertiesImpl struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s BaseProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s +} + +var _ ProjectTaskProperties = RawProjectTaskPropertiesImpl{} + +// RawProjectTaskPropertiesImpl is returned when the Discriminated Value doesn't match any of the defined types +// NOTE: this should only be used when a type isn't defined for this type of Object (as a workaround) +// and is used only for Deserialization (e.g. this cannot be used as a Request Payload). +type RawProjectTaskPropertiesImpl struct { + projectTaskProperties BaseProjectTaskPropertiesImpl + Type string + Values map[string]interface{} +} + +func (s RawProjectTaskPropertiesImpl) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return s.projectTaskProperties +} + +var _ json.Unmarshaler = &BaseProjectTaskPropertiesImpl{} + +func (s *BaseProjectTaskPropertiesImpl) UnmarshalJSON(bytes []byte) error { + var decoded struct { + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling BaseProjectTaskPropertiesImpl into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'BaseProjectTaskPropertiesImpl': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} + +func UnmarshalProjectTaskPropertiesImplementation(input []byte) (ProjectTaskProperties, error) { + if input == nil { + return nil, nil + } + + var temp map[string]interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return nil, fmt.Errorf("unmarshaling ProjectTaskProperties into map[string]interface: %+v", err) + } + + var value string + if v, ok := temp["taskType"]; ok { + value = fmt.Sprintf("%v", v) + } + + if strings.EqualFold(value, "Connect.MongoDb") { + var out ConnectToMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.MySql") { + var out ConnectToSourceMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.Oracle.Sync") { + var out ConnectToSourceOracleSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceOracleSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.PostgreSql.Sync") { + var out ConnectToSourcePostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourcePostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer.Sync") { + var out ConnectToSourceSqlServerSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToSource.SqlServer") { + var out ConnectToSourceSqlServerTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToSourceSqlServerTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForMySql") { + var out ConnectToTargetAzureDbForMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.Oracle.AzureDbForPostgreSql.Sync") { + var out ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb") { + var out ConnectToTargetSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI.Sync.LRS") { + var out ConnectToTargetSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.AzureSqlDbMI") { + var out ConnectToTargetSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ConnectToTarget.SqlDb.Sync") { + var out ConnectToTargetSqlSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ConnectToTargetSqlSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetTDECertificates.Sql") { + var out GetTdeCertificatesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetTdeCertificatesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesMySql") { + var out GetUserTablesMySqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesMySqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesOracle") { + var out GetUserTablesOracleTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesOracleTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTablesPostgreSql") { + var out GetUserTablesPostgreSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesPostgreSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.AzureSqlDb.Sync") { + var out GetUserTablesSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "GetUserTables.Sql") { + var out GetUserTablesSqlTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into GetUserTablesSqlTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MongoDb") { + var out MigrateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql") { + var out MigrateMySqlAzureDbForMySqlOfflineTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlOfflineTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.MySql.AzureDbForMySql.Sync") { + var out MigrateMySqlAzureDbForMySqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateMySqlAzureDbForMySqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Oracle.AzureDbForPostgreSql.Sync") { + var out MigrateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.PostgreSql.AzureDbForPostgreSql.SyncV2") { + var out MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigratePostgreSqlAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDb.Sync") { + var out MigrateSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.SqlDb") { + var out MigrateSqlServerSqlDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out MigrateSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.SqlServer.AzureSqlDbMI") { + var out MigrateSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Migrate.Ssis") { + var out MigrateSsisTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into MigrateSsisTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.SqlDb.Sync") { + var out ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS") { + var out ValidateMigrationInputSqlServerSqlMISyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "ValidateMigrationInput.SqlServer.AzureSqlDbMI") { + var out ValidateMigrationInputSqlServerSqlMITaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.MongoDb") { + var out ValidateMongoDbTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateMongoDbTaskProperties: %+v", err) + } + return out, nil + } + + if strings.EqualFold(value, "Validate.Oracle.AzureDbPostgreSql.Sync") { + var out ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + if err := json.Unmarshal(input, &out); err != nil { + return nil, fmt.Errorf("unmarshaling into ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + return out, nil + } + + var parent BaseProjectTaskPropertiesImpl + if err := json.Unmarshal(input, &parent); err != nil { + return nil, fmt.Errorf("unmarshaling into BaseProjectTaskPropertiesImpl: %+v", err) + } + + return RawProjectTaskPropertiesImpl{ + projectTaskProperties: parent, + Type: value, + Values: temp, + }, nil + +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_queryanalysisvalidationresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_queryanalysisvalidationresult.go new file mode 100644 index 00000000000..64a1ea12448 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_queryanalysisvalidationresult.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryAnalysisValidationResult struct { + QueryResults *QueryExecutionResult `json:"queryResults,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_queryexecutionresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_queryexecutionresult.go new file mode 100644 index 00000000000..e69c3d41f3f --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_queryexecutionresult.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryExecutionResult struct { + QueryText *string `json:"queryText,omitempty"` + SourceResult *ExecutionStatistics `json:"sourceResult,omitempty"` + StatementsInBatch *int64 `json:"statementsInBatch,omitempty"` + TargetResult *ExecutionStatistics `json:"targetResult,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_reportableexception.go b/resource-manager/datamigration/2025-06-30/taskresource/model_reportableexception.go new file mode 100644 index 00000000000..b62013daca3 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_reportableexception.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReportableException struct { + ActionableMessage *string `json:"actionableMessage,omitempty"` + FilePath *string `json:"filePath,omitempty"` + HResult *int64 `json:"hResult,omitempty"` + LineNumber *string `json:"lineNumber,omitempty"` + Message *string `json:"message,omitempty"` + StackTrace *string `json:"stackTrace,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_schemacomparisonvalidationresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_schemacomparisonvalidationresult.go new file mode 100644 index 00000000000..ff3ab81fa15 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_schemacomparisonvalidationresult.go @@ -0,0 +1,11 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResult struct { + SchemaDifferences *SchemaComparisonValidationResultType `json:"schemaDifferences,omitempty"` + SourceDatabaseObjectCount *map[string]int64 `json:"sourceDatabaseObjectCount,omitempty"` + TargetDatabaseObjectCount *map[string]int64 `json:"targetDatabaseObjectCount,omitempty"` + ValidationErrors *ValidationError `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_schemacomparisonvalidationresulttype.go b/resource-manager/datamigration/2025-06-30/taskresource/model_schemacomparisonvalidationresulttype.go new file mode 100644 index 00000000000..43a4b30c4e8 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_schemacomparisonvalidationresulttype.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SchemaComparisonValidationResultType struct { + ObjectName *string `json:"objectName,omitempty"` + ObjectType *ObjectType `json:"objectType,omitempty"` + UpdateAction *UpdateActionType `json:"updateAction,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_selectedcertificateinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_selectedcertificateinput.go new file mode 100644 index 00000000000..7196e68b89a --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_selectedcertificateinput.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SelectedCertificateInput struct { + CertificateName string `json:"certificateName"` + Password string `json:"password"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_serverproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_serverproperties.go new file mode 100644 index 00000000000..3b630953036 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_serverproperties.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ServerProperties struct { + ServerDatabaseCount *int64 `json:"serverDatabaseCount,omitempty"` + ServerEdition *string `json:"serverEdition,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerOperatingSystemVersion *string `json:"serverOperatingSystemVersion,omitempty"` + ServerPlatform *string `json:"serverPlatform,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_sqlconnectioninfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_sqlconnectioninfo.go new file mode 100644 index 00000000000..2733e7ca730 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_sqlconnectioninfo.go @@ -0,0 +1,64 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ConnectionInfo = SqlConnectionInfo{} + +type SqlConnectionInfo struct { + AdditionalSettings *string `json:"additionalSettings,omitempty"` + Authentication *AuthenticationType `json:"authentication,omitempty"` + DataSource string `json:"dataSource"` + EncryptConnection *bool `json:"encryptConnection,omitempty"` + Platform *SqlSourcePlatform `json:"platform,omitempty"` + Port *int64 `json:"port,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + ServerBrandVersion *string `json:"serverBrandVersion,omitempty"` + ServerName *string `json:"serverName,omitempty"` + ServerVersion *string `json:"serverVersion,omitempty"` + TrustServerCertificate *bool `json:"trustServerCertificate,omitempty"` + + // Fields inherited from ConnectionInfo + + Password *string `json:"password,omitempty"` + Type string `json:"type"` + UserName *string `json:"userName,omitempty"` +} + +func (s SqlConnectionInfo) ConnectionInfo() BaseConnectionInfoImpl { + return BaseConnectionInfoImpl{ + Password: s.Password, + Type: s.Type, + UserName: s.UserName, + } +} + +var _ json.Marshaler = SqlConnectionInfo{} + +func (s SqlConnectionInfo) MarshalJSON() ([]byte, error) { + type wrapper SqlConnectionInfo + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling SqlConnectionInfo: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling SqlConnectionInfo: %+v", err) + } + + decoded["type"] = "SqlConnectionInfo" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling SqlConnectionInfo: %+v", err) + } + + return encoded, nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_sqlserversqlmisynctaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_sqlserversqlmisynctaskinput.go new file mode 100644 index 00000000000..c0a52130e67 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_sqlserversqlmisynctaskinput.go @@ -0,0 +1,13 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SqlServerSqlMISyncTaskInput struct { + AzureApp AzureActiveDirectoryApp `json:"azureApp"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + StorageResourceId string `json:"storageResourceId"` + TargetConnectionInfo MiSqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_ssismigrationinfo.go b/resource-manager/datamigration/2025-06-30/taskresource/model_ssismigrationinfo.go new file mode 100644 index 00000000000..e6beeebec91 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_ssismigrationinfo.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SsisMigrationInfo struct { + EnvironmentOverwriteOption *SsisMigrationOverwriteOption `json:"environmentOverwriteOption,omitempty"` + ProjectOverwriteOption *SsisMigrationOverwriteOption `json:"projectOverwriteOption,omitempty"` + SsisStoreType *SsisStoreType `json:"ssisStoreType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_startmigrationscenarioserverroleresult.go b/resource-manager/datamigration/2025-06-30/taskresource/model_startmigrationscenarioserverroleresult.go new file mode 100644 index 00000000000..d77a89b3a07 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_startmigrationscenarioserverroleresult.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StartMigrationScenarioServerRoleResult struct { + ExceptionsAndWarnings *[]ReportableException `json:"exceptionsAndWarnings,omitempty"` + Name *string `json:"name,omitempty"` + State *MigrationState `json:"state,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_syncmigrationdatabaseerrorevent.go b/resource-manager/datamigration/2025-06-30/taskresource/model_syncmigrationdatabaseerrorevent.go new file mode 100644 index 00000000000..c67bec7e070 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_syncmigrationdatabaseerrorevent.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SyncMigrationDatabaseErrorEvent struct { + EventText *string `json:"eventText,omitempty"` + EventTypeString *string `json:"eventTypeString,omitempty"` + TimestampString *string `json:"timestampString,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go new file mode 100644 index 00000000000..8d52449d6c4 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqldbsynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlDbSyncTaskProperties struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlDbSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.SqlDb.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlDbSyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlDbSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateSyncMigrationInputSqlServerTaskInput `json:"input,omitempty"` + Output *[]ValidateSyncMigrationInputSqlServerTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlDbSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlDbSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go new file mode 100644 index 00000000000..4ace5ea7dbc --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmisynctaskoutput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMISyncTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go new file mode 100644 index 00000000000..5f243dc7314 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmisynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +type ValidateMigrationInputSqlServerSqlMISyncTaskProperties struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMISyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMISyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI.Sync.LRS" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMISyncTaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMISyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *SqlServerSqlMISyncTaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMISyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMISyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMISyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskinput.go new file mode 100644 index 00000000000..3679ddf26eb --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskinput.go @@ -0,0 +1,14 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskInput struct { + BackupBlobShare BlobShare `json:"backupBlobShare"` + BackupFileShare *FileShare `json:"backupFileShare,omitempty"` + BackupMode *BackupMode `json:"backupMode,omitempty"` + SelectedDatabases []MigrateSqlServerSqlMIDatabaseInput `json:"selectedDatabases"` + SelectedLogins *[]string `json:"selectedLogins,omitempty"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskoutput.go new file mode 100644 index 00000000000..3ce9bbb8d9b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskoutput.go @@ -0,0 +1,15 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateMigrationInputSqlServerSqlMITaskOutput struct { + BackupFolderErrors *[]ReportableException `json:"backupFolderErrors,omitempty"` + BackupShareCredentialsErrors *[]ReportableException `json:"backupShareCredentialsErrors,omitempty"` + BackupStorageAccountErrors *[]ReportableException `json:"backupStorageAccountErrors,omitempty"` + DatabaseBackupInfo *DatabaseBackupInfo `json:"databaseBackupInfo,omitempty"` + ExistingBackupErrors *[]ReportableException `json:"existingBackupErrors,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + RestoreDatabaseNameErrors *[]ReportableException `json:"restoreDatabaseNameErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskproperties.go new file mode 100644 index 00000000000..ae438ca06ef --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemigrationinputsqlserversqlmitaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +type ValidateMigrationInputSqlServerSqlMITaskProperties struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s ValidateMigrationInputSqlServerSqlMITaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMigrationInputSqlServerSqlMITaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + decoded["taskType"] = "ValidateMigrationInput.SqlServer.AzureSqlDbMI" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMigrationInputSqlServerSqlMITaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMigrationInputSqlServerSqlMITaskProperties{} + +func (s *ValidateMigrationInputSqlServerSqlMITaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *ValidateMigrationInputSqlServerSqlMITaskInput `json:"input,omitempty"` + Output *[]ValidateMigrationInputSqlServerSqlMITaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMigrationInputSqlServerSqlMITaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMigrationInputSqlServerSqlMITaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatemongodbtaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemongodbtaskproperties.go new file mode 100644 index 00000000000..d260bc29ade --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatemongodbtaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateMongoDbTaskProperties{} + +type ValidateMongoDbTaskProperties struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateMongoDbTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateMongoDbTaskProperties{} + +func (s ValidateMongoDbTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateMongoDbTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.MongoDb" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateMongoDbTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateMongoDbTaskProperties{} + +func (s *ValidateMongoDbTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MongoDbMigrationSettings `json:"input,omitempty"` + Output *[]MongoDbMigrationProgress `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateMongoDbTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateMongoDbTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go new file mode 100644 index 00000000000..aa09891e139 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validateoracleazuredbforpostgresqlsynctaskproperties.go @@ -0,0 +1,106 @@ +package taskresource + +import ( + "encoding/json" + "fmt" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ ProjectTaskProperties = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +type ValidateOracleAzureDbForPostgreSqlSyncTaskProperties struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + + // Fields inherited from ProjectTaskProperties + + ClientData *map[string]string `json:"clientData,omitempty"` + Commands *[]CommandProperties `json:"commands,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` +} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) ProjectTaskProperties() BaseProjectTaskPropertiesImpl { + return BaseProjectTaskPropertiesImpl{ + ClientData: s.ClientData, + Commands: s.Commands, + Errors: s.Errors, + State: s.State, + TaskType: s.TaskType, + } +} + +var _ json.Marshaler = ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) MarshalJSON() ([]byte, error) { + type wrapper ValidateOracleAzureDbForPostgreSqlSyncTaskProperties + wrapped := wrapper(s) + encoded, err := json.Marshal(wrapped) + if err != nil { + return nil, fmt.Errorf("marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + var decoded map[string]interface{} + if err = json.Unmarshal(encoded, &decoded); err != nil { + return nil, fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + decoded["taskType"] = "Validate.Oracle.AzureDbPostgreSql.Sync" + + encoded, err = json.Marshal(decoded) + if err != nil { + return nil, fmt.Errorf("re-marshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties: %+v", err) + } + + return encoded, nil +} + +var _ json.Unmarshaler = &ValidateOracleAzureDbForPostgreSqlSyncTaskProperties{} + +func (s *ValidateOracleAzureDbForPostgreSqlSyncTaskProperties) UnmarshalJSON(bytes []byte) error { + var decoded struct { + Input *MigrateOracleAzureDbPostgreSqlSyncTaskInput `json:"input,omitempty"` + Output *[]ValidateOracleAzureDbPostgreSqlSyncTaskOutput `json:"output,omitempty"` + ClientData *map[string]string `json:"clientData,omitempty"` + Errors *[]ODataError `json:"errors,omitempty"` + State *TaskState `json:"state,omitempty"` + TaskType TaskType `json:"taskType"` + } + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + + s.Input = decoded.Input + s.Output = decoded.Output + s.ClientData = decoded.ClientData + s.Errors = decoded.Errors + s.State = decoded.State + s.TaskType = decoded.TaskType + + var temp map[string]json.RawMessage + if err := json.Unmarshal(bytes, &temp); err != nil { + return fmt.Errorf("unmarshaling ValidateOracleAzureDbForPostgreSqlSyncTaskProperties into map[string]json.RawMessage: %+v", err) + } + + if v, ok := temp["commands"]; ok { + var listTemp []json.RawMessage + if err := json.Unmarshal(v, &listTemp); err != nil { + return fmt.Errorf("unmarshaling Commands into list []json.RawMessage: %+v", err) + } + + output := make([]CommandProperties, 0) + for i, val := range listTemp { + impl, err := UnmarshalCommandPropertiesImplementation(val) + if err != nil { + return fmt.Errorf("unmarshaling index %d field 'Commands' for 'ValidateOracleAzureDbForPostgreSqlSyncTaskProperties': %+v", i, err) + } + output = append(output, impl) + } + s.Commands = &output + } + + return nil +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go new file mode 100644 index 00000000000..c92b765c870 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validateoracleazuredbpostgresqlsynctaskoutput.go @@ -0,0 +1,8 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateOracleAzureDbPostgreSqlSyncTaskOutput struct { + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatesyncmigrationinputsqlservertaskinput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatesyncmigrationinputsqlservertaskinput.go new file mode 100644 index 00000000000..58f491be4c1 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatesyncmigrationinputsqlservertaskinput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskInput struct { + SelectedDatabases []MigrateSqlServerSqlDbSyncDatabaseInput `json:"selectedDatabases"` + SourceConnectionInfo SqlConnectionInfo `json:"sourceConnectionInfo"` + TargetConnectionInfo SqlConnectionInfo `json:"targetConnectionInfo"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validatesyncmigrationinputsqlservertaskoutput.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validatesyncmigrationinputsqlservertaskoutput.go new file mode 100644 index 00000000000..32bb29910ee --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validatesyncmigrationinputsqlservertaskoutput.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidateSyncMigrationInputSqlServerTaskOutput struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ValidationErrors *[]ReportableException `json:"validationErrors,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_validationerror.go b/resource-manager/datamigration/2025-06-30/taskresource/model_validationerror.go new file mode 100644 index 00000000000..3ed9aa47b1b --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_validationerror.go @@ -0,0 +1,9 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ValidationError struct { + Severity *Severity `json:"severity,omitempty"` + Text *string `json:"text,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/model_waitstatistics.go b/resource-manager/datamigration/2025-06-30/taskresource/model_waitstatistics.go new file mode 100644 index 00000000000..e3a8eaec0f9 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/model_waitstatistics.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WaitStatistics struct { + WaitCount *int64 `json:"waitCount,omitempty"` + WaitTimeMs *float64 `json:"waitTimeMs,omitempty"` + WaitType *string `json:"waitType,omitempty"` +} diff --git a/resource-manager/datamigration/2025-06-30/taskresource/version.go b/resource-manager/datamigration/2025-06-30/taskresource/version.go new file mode 100644 index 00000000000..b1edecc9059 --- /dev/null +++ b/resource-manager/datamigration/2025-06-30/taskresource/version.go @@ -0,0 +1,10 @@ +package taskresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-30" + +func userAgent() string { + return "hashicorp/go-azure-sdk/taskresource/2025-06-30" +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/README.md b/resource-manager/netapp/2025-06-01/backuppolicy/README.md new file mode 100644 index 00000000000..7de92a97bcf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/README.md @@ -0,0 +1,98 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backuppolicy` Documentation + +The `backuppolicy` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backuppolicy" +``` + + +### Client Initialization + +```go +client := backuppolicy.NewBackupPolicyClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `BackupPolicyClient.BackupPoliciesCreate` + +```go +ctx := context.TODO() +id := backuppolicy.NewBackupPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupPolicyName") + +payload := backuppolicy.BackupPolicy{ + // ... +} + + +if err := client.BackupPoliciesCreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupPolicyClient.BackupPoliciesDelete` + +```go +ctx := context.TODO() +id := backuppolicy.NewBackupPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupPolicyName") + +if err := client.BackupPoliciesDeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupPolicyClient.BackupPoliciesGet` + +```go +ctx := context.TODO() +id := backuppolicy.NewBackupPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupPolicyName") + +read, err := client.BackupPoliciesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BackupPolicyClient.BackupPoliciesList` + +```go +ctx := context.TODO() +id := backuppolicy.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +read, err := client.BackupPoliciesList(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BackupPolicyClient.BackupPoliciesUpdate` + +```go +ctx := context.TODO() +id := backuppolicy.NewBackupPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupPolicyName") + +payload := backuppolicy.BackupPolicyPatch{ + // ... +} + + +if err := client.BackupPoliciesUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/client.go b/resource-manager/netapp/2025-06-01/backuppolicy/client.go new file mode 100644 index 00000000000..8b4fc07ad66 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/client.go @@ -0,0 +1,26 @@ +package backuppolicy + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPolicyClient struct { + Client *resourcemanager.Client +} + +func NewBackupPolicyClientWithBaseURI(sdkApi sdkEnv.Api) (*BackupPolicyClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "backuppolicy", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating BackupPolicyClient: %+v", err) + } + + return &BackupPolicyClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/id_backuppolicy.go b/resource-manager/netapp/2025-06-01/backuppolicy/id_backuppolicy.go new file mode 100644 index 00000000000..7e2cd95f210 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/id_backuppolicy.go @@ -0,0 +1,139 @@ +package backuppolicy + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BackupPolicyId{}) +} + +var _ resourceids.ResourceId = &BackupPolicyId{} + +// BackupPolicyId is a struct representing the Resource ID for a Backup Policy +type BackupPolicyId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + BackupPolicyName string +} + +// NewBackupPolicyID returns a new BackupPolicyId struct +func NewBackupPolicyID(subscriptionId string, resourceGroupName string, netAppAccountName string, backupPolicyName string) BackupPolicyId { + return BackupPolicyId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + BackupPolicyName: backupPolicyName, + } +} + +// ParseBackupPolicyID parses 'input' into a BackupPolicyId +func ParseBackupPolicyID(input string) (*BackupPolicyId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupPolicyId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupPolicyId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBackupPolicyIDInsensitively parses 'input' case-insensitively into a BackupPolicyId +// note: this method should only be used for API response data and not user input +func ParseBackupPolicyIDInsensitively(input string) (*BackupPolicyId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupPolicyId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupPolicyId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BackupPolicyId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.BackupPolicyName, ok = input.Parsed["backupPolicyName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "backupPolicyName", input) + } + + return nil +} + +// ValidateBackupPolicyID checks that 'input' can be parsed as a Backup Policy ID +func ValidateBackupPolicyID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBackupPolicyID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Backup Policy ID +func (id BackupPolicyId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/backupPolicies/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.BackupPolicyName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Backup Policy ID +func (id BackupPolicyId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticBackupPolicies", "backupPolicies", "backupPolicies"), + resourceids.UserSpecifiedSegment("backupPolicyName", "backupPolicyName"), + } +} + +// String returns a human-readable description of this Backup Policy ID +func (id BackupPolicyId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Backup Policy Name: %q", id.BackupPolicyName), + } + return fmt.Sprintf("Backup Policy (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/id_backuppolicy_test.go b/resource-manager/netapp/2025-06-01/backuppolicy/id_backuppolicy_test.go new file mode 100644 index 00000000000..39877320168 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/id_backuppolicy_test.go @@ -0,0 +1,327 @@ +package backuppolicy + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &BackupPolicyId{} + +func TestNewBackupPolicyID(t *testing.T) { + id := NewBackupPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupPolicyName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.BackupPolicyName != "backupPolicyName" { + t.Fatalf("Expected %q but got %q for Segment 'BackupPolicyName'", id.BackupPolicyName, "backupPolicyName") + } +} + +func TestFormatBackupPolicyID(t *testing.T) { + actual := NewBackupPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupPolicyName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupPolicies/backupPolicyName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseBackupPolicyID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupPolicyId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupPolicies", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupPolicies/backupPolicyName", + Expected: &BackupPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupPolicyName: "backupPolicyName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupPolicies/backupPolicyName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupPolicyID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupPolicyName != v.Expected.BackupPolicyName { + t.Fatalf("Expected %q but got %q for BackupPolicyName", v.Expected.BackupPolicyName, actual.BackupPolicyName) + } + + } +} + +func TestParseBackupPolicyIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupPolicyId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupPolicies", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPpOlIcIeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupPolicies/backupPolicyName", + Expected: &BackupPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupPolicyName: "backupPolicyName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupPolicies/backupPolicyName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPpOlIcIeS/bAcKuPpOlIcYnAmE", + Expected: &BackupPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + BackupPolicyName: "bAcKuPpOlIcYnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPpOlIcIeS/bAcKuPpOlIcYnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupPolicyIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupPolicyName != v.Expected.BackupPolicyName { + t.Fatalf("Expected %q but got %q for BackupPolicyName", v.Expected.BackupPolicyName, actual.BackupPolicyName) + } + + } +} + +func TestSegmentsForBackupPolicyId(t *testing.T) { + segments := BackupPolicyId{}.Segments() + if len(segments) == 0 { + t.Fatalf("BackupPolicyId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/id_netappaccount.go b/resource-manager/netapp/2025-06-01/backuppolicy/id_netappaccount.go new file mode 100644 index 00000000000..b10e89e1ad1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/id_netappaccount.go @@ -0,0 +1,130 @@ +package backuppolicy + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&NetAppAccountId{}) +} + +var _ resourceids.ResourceId = &NetAppAccountId{} + +// NetAppAccountId is a struct representing the Resource ID for a Net App Account +type NetAppAccountId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string +} + +// NewNetAppAccountID returns a new NetAppAccountId struct +func NewNetAppAccountID(subscriptionId string, resourceGroupName string, netAppAccountName string) NetAppAccountId { + return NetAppAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + } +} + +// ParseNetAppAccountID parses 'input' into a NetAppAccountId +func ParseNetAppAccountID(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseNetAppAccountIDInsensitively parses 'input' case-insensitively into a NetAppAccountId +// note: this method should only be used for API response data and not user input +func ParseNetAppAccountIDInsensitively(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *NetAppAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + return nil +} + +// ValidateNetAppAccountID checks that 'input' can be parsed as a Net App Account ID +func ValidateNetAppAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNetAppAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Net App Account ID +func (id NetAppAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Net App Account ID +func (id NetAppAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + } +} + +// String returns a human-readable description of this Net App Account ID +func (id NetAppAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + } + return fmt.Sprintf("Net App Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/id_netappaccount_test.go b/resource-manager/netapp/2025-06-01/backuppolicy/id_netappaccount_test.go new file mode 100644 index 00000000000..455b109bb39 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/id_netappaccount_test.go @@ -0,0 +1,282 @@ +package backuppolicy + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &NetAppAccountId{} + +func TestNewNetAppAccountID(t *testing.T) { + id := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } +} + +func TestFormatNetAppAccountID(t *testing.T) { + actual := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseNetAppAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestParseNetAppAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestSegmentsForNetAppAccountId(t *testing.T) { + segments := NetAppAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("NetAppAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciescreate.go b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciescreate.go new file mode 100644 index 00000000000..bb886f44754 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciescreate.go @@ -0,0 +1,76 @@ +package backuppolicy + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPoliciesCreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BackupPolicy +} + +// BackupPoliciesCreate ... +func (c BackupPolicyClient) BackupPoliciesCreate(ctx context.Context, id BackupPolicyId, input BackupPolicy) (result BackupPoliciesCreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// BackupPoliciesCreateThenPoll performs BackupPoliciesCreate then polls until it's completed +func (c BackupPolicyClient) BackupPoliciesCreateThenPoll(ctx context.Context, id BackupPolicyId, input BackupPolicy) error { + result, err := c.BackupPoliciesCreate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing BackupPoliciesCreate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after BackupPoliciesCreate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesdelete.go b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesdelete.go new file mode 100644 index 00000000000..482cdc1780e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesdelete.go @@ -0,0 +1,71 @@ +package backuppolicy + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPoliciesDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// BackupPoliciesDelete ... +func (c BackupPolicyClient) BackupPoliciesDelete(ctx context.Context, id BackupPolicyId) (result BackupPoliciesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// BackupPoliciesDeleteThenPoll performs BackupPoliciesDelete then polls until it's completed +func (c BackupPolicyClient) BackupPoliciesDeleteThenPoll(ctx context.Context, id BackupPolicyId) error { + result, err := c.BackupPoliciesDelete(ctx, id) + if err != nil { + return fmt.Errorf("performing BackupPoliciesDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after BackupPoliciesDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesget.go b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesget.go new file mode 100644 index 00000000000..e8ecfc1fc04 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesget.go @@ -0,0 +1,53 @@ +package backuppolicy + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPoliciesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BackupPolicy +} + +// BackupPoliciesGet ... +func (c BackupPolicyClient) BackupPoliciesGet(ctx context.Context, id BackupPolicyId) (result BackupPoliciesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BackupPolicy + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppolicieslist.go b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppolicieslist.go new file mode 100644 index 00000000000..7932d964240 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppolicieslist.go @@ -0,0 +1,54 @@ +package backuppolicy + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPoliciesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BackupPoliciesList +} + +// BackupPoliciesList ... +func (c BackupPolicyClient) BackupPoliciesList(ctx context.Context, id NetAppAccountId) (result BackupPoliciesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/backupPolicies", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BackupPoliciesList + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesupdate.go b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesupdate.go new file mode 100644 index 00000000000..b24e23a21cf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/method_backuppoliciesupdate.go @@ -0,0 +1,75 @@ +package backuppolicy + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPoliciesUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BackupPolicy +} + +// BackupPoliciesUpdate ... +func (c BackupPolicyClient) BackupPoliciesUpdate(ctx context.Context, id BackupPolicyId, input BackupPolicyPatch) (result BackupPoliciesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// BackupPoliciesUpdateThenPoll performs BackupPoliciesUpdate then polls until it's completed +func (c BackupPolicyClient) BackupPoliciesUpdateThenPoll(ctx context.Context, id BackupPolicyId, input BackupPolicyPatch) error { + result, err := c.BackupPoliciesUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing BackupPoliciesUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after BackupPoliciesUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicieslist.go b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicieslist.go new file mode 100644 index 00000000000..3db3d131a60 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicieslist.go @@ -0,0 +1,8 @@ +package backuppolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPoliciesList struct { + Value *[]BackupPolicy `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicy.go b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicy.go new file mode 100644 index 00000000000..747ab65be24 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicy.go @@ -0,0 +1,19 @@ +package backuppolicy + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPolicy struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties BackupPolicyProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicypatch.go b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicypatch.go new file mode 100644 index 00000000000..bf7eecb1358 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicypatch.go @@ -0,0 +1,13 @@ +package backuppolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPolicyPatch struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *BackupPolicyProperties `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicyproperties.go b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicyproperties.go new file mode 100644 index 00000000000..d5694f4797d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/model_backuppolicyproperties.go @@ -0,0 +1,15 @@ +package backuppolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPolicyProperties struct { + BackupPolicyId *string `json:"backupPolicyId,omitempty"` + DailyBackupsToKeep *int64 `json:"dailyBackupsToKeep,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + MonthlyBackupsToKeep *int64 `json:"monthlyBackupsToKeep,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + VolumeBackups *[]VolumeBackups `json:"volumeBackups,omitempty"` + VolumesAssigned *int64 `json:"volumesAssigned,omitempty"` + WeeklyBackupsToKeep *int64 `json:"weeklyBackupsToKeep,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/model_volumebackups.go b/resource-manager/netapp/2025-06-01/backuppolicy/model_volumebackups.go new file mode 100644 index 00000000000..b53afb9ae14 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/model_volumebackups.go @@ -0,0 +1,11 @@ +package backuppolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeBackups struct { + BackupsCount *int64 `json:"backupsCount,omitempty"` + PolicyEnabled *bool `json:"policyEnabled,omitempty"` + VolumeName *string `json:"volumeName,omitempty"` + VolumeResourceId *string `json:"volumeResourceId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backuppolicy/version.go b/resource-manager/netapp/2025-06-01/backuppolicy/version.go new file mode 100644 index 00000000000..314a9b2baff --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backuppolicy/version.go @@ -0,0 +1,10 @@ +package backuppolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/backuppolicy/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/backups/README.md b/resource-manager/netapp/2025-06-01/backups/README.md new file mode 100644 index 00000000000..7177518ac40 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/README.md @@ -0,0 +1,166 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backups` Documentation + +The `backups` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backups" +``` + + +### Client Initialization + +```go +client := backups.NewBackupsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `BackupsClient.Create` + +```go +ctx := context.TODO() +id := backups.NewBackupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName", "backupName") + +payload := backups.Backup{ + // ... +} + + +if err := client.CreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupsClient.Delete` + +```go +ctx := context.TODO() +id := backups.NewBackupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName", "backupName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupsClient.Get` + +```go +ctx := context.TODO() +id := backups.NewBackupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName", "backupName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BackupsClient.GetLatestStatus` + +```go +ctx := context.TODO() +id := backups.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +read, err := client.GetLatestStatus(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BackupsClient.ListByVault` + +```go +ctx := context.TODO() +id := backups.NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName") + +// alternatively `client.ListByVault(ctx, id, backups.DefaultListByVaultOperationOptions())` can be used to do batched pagination +items, err := client.ListByVaultComplete(ctx, id, backups.DefaultListByVaultOperationOptions()) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `BackupsClient.UnderAccountMigrateBackups` + +```go +ctx := context.TODO() +id := backups.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +payload := backups.BackupsMigrationRequest{ + // ... +} + + +if err := client.UnderAccountMigrateBackupsThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupsClient.UnderBackupVaultRestoreFiles` + +```go +ctx := context.TODO() +id := backups.NewBackupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName", "backupName") + +payload := backups.BackupRestoreFiles{ + // ... +} + + +if err := client.UnderBackupVaultRestoreFilesThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupsClient.UnderVolumeMigrateBackups` + +```go +ctx := context.TODO() +id := backups.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := backups.BackupsMigrationRequest{ + // ... +} + + +if err := client.UnderVolumeMigrateBackupsThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupsClient.Update` + +```go +ctx := context.TODO() +id := backups.NewBackupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName", "backupName") + +payload := backups.BackupPatch{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/backups/client.go b/resource-manager/netapp/2025-06-01/backups/client.go new file mode 100644 index 00000000000..6e1cc70d434 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/client.go @@ -0,0 +1,26 @@ +package backups + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupsClient struct { + Client *resourcemanager.Client +} + +func NewBackupsClientWithBaseURI(sdkApi sdkEnv.Api) (*BackupsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "backups", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating BackupsClient: %+v", err) + } + + return &BackupsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/constants.go b/resource-manager/netapp/2025-06-01/backups/constants.go new file mode 100644 index 00000000000..d3c86cd75c0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/constants.go @@ -0,0 +1,142 @@ +package backups + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupType string + +const ( + BackupTypeManual BackupType = "Manual" + BackupTypeScheduled BackupType = "Scheduled" +) + +func PossibleValuesForBackupType() []string { + return []string{ + string(BackupTypeManual), + string(BackupTypeScheduled), + } +} + +func (s *BackupType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupType(input string) (*BackupType, error) { + vals := map[string]BackupType{ + "manual": BackupTypeManual, + "scheduled": BackupTypeScheduled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupType(input) + return &out, nil +} + +type MirrorState string + +const ( + MirrorStateBroken MirrorState = "Broken" + MirrorStateMirrored MirrorState = "Mirrored" + MirrorStateUninitialized MirrorState = "Uninitialized" +) + +func PossibleValuesForMirrorState() []string { + return []string{ + string(MirrorStateBroken), + string(MirrorStateMirrored), + string(MirrorStateUninitialized), + } +} + +func (s *MirrorState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMirrorState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMirrorState(input string) (*MirrorState, error) { + vals := map[string]MirrorState{ + "broken": MirrorStateBroken, + "mirrored": MirrorStateMirrored, + "uninitialized": MirrorStateUninitialized, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MirrorState(input) + return &out, nil +} + +type RelationshipStatus string + +const ( + RelationshipStatusFailed RelationshipStatus = "Failed" + RelationshipStatusIdle RelationshipStatus = "Idle" + RelationshipStatusTransferring RelationshipStatus = "Transferring" + RelationshipStatusUnknown RelationshipStatus = "Unknown" +) + +func PossibleValuesForRelationshipStatus() []string { + return []string{ + string(RelationshipStatusFailed), + string(RelationshipStatusIdle), + string(RelationshipStatusTransferring), + string(RelationshipStatusUnknown), + } +} + +func (s *RelationshipStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseRelationshipStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseRelationshipStatus(input string) (*RelationshipStatus, error) { + vals := map[string]RelationshipStatus{ + "failed": RelationshipStatusFailed, + "idle": RelationshipStatusIdle, + "transferring": RelationshipStatusTransferring, + "unknown": RelationshipStatusUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := RelationshipStatus(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_backup.go b/resource-manager/netapp/2025-06-01/backups/id_backup.go new file mode 100644 index 00000000000..06aacd5f5b5 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_backup.go @@ -0,0 +1,148 @@ +package backups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BackupId{}) +} + +var _ resourceids.ResourceId = &BackupId{} + +// BackupId is a struct representing the Resource ID for a Backup +type BackupId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + BackupVaultName string + BackupName string +} + +// NewBackupID returns a new BackupId struct +func NewBackupID(subscriptionId string, resourceGroupName string, netAppAccountName string, backupVaultName string, backupName string) BackupId { + return BackupId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + BackupVaultName: backupVaultName, + BackupName: backupName, + } +} + +// ParseBackupID parses 'input' into a BackupId +func ParseBackupID(input string) (*BackupId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBackupIDInsensitively parses 'input' case-insensitively into a BackupId +// note: this method should only be used for API response data and not user input +func ParseBackupIDInsensitively(input string) (*BackupId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BackupId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.BackupVaultName, ok = input.Parsed["backupVaultName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "backupVaultName", input) + } + + if id.BackupName, ok = input.Parsed["backupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "backupName", input) + } + + return nil +} + +// ValidateBackupID checks that 'input' can be parsed as a Backup ID +func ValidateBackupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBackupID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Backup ID +func (id BackupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/backupVaults/%s/backups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.BackupVaultName, id.BackupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Backup ID +func (id BackupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticBackupVaults", "backupVaults", "backupVaults"), + resourceids.UserSpecifiedSegment("backupVaultName", "backupVaultName"), + resourceids.StaticSegment("staticBackups", "backups", "backups"), + resourceids.UserSpecifiedSegment("backupName", "backupName"), + } +} + +// String returns a human-readable description of this Backup ID +func (id BackupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Backup Vault Name: %q", id.BackupVaultName), + fmt.Sprintf("Backup Name: %q", id.BackupName), + } + return fmt.Sprintf("Backup (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_backup_test.go b/resource-manager/netapp/2025-06-01/backups/id_backup_test.go new file mode 100644 index 00000000000..02c2ee418cc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_backup_test.go @@ -0,0 +1,372 @@ +package backups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &BackupId{} + +func TestNewBackupID(t *testing.T) { + id := NewBackupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName", "backupName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.BackupVaultName != "backupVaultName" { + t.Fatalf("Expected %q but got %q for Segment 'BackupVaultName'", id.BackupVaultName, "backupVaultName") + } + + if id.BackupName != "backupName" { + t.Fatalf("Expected %q but got %q for Segment 'BackupName'", id.BackupName, "backupName") + } +} + +func TestFormatBackupID(t *testing.T) { + actual := NewBackupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName", "backupName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/backups/backupName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseBackupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/backups", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/backups/backupName", + Expected: &BackupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupVaultName: "backupVaultName", + BackupName: "backupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/backups/backupName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + + if actual.BackupName != v.Expected.BackupName { + t.Fatalf("Expected %q but got %q for BackupName", v.Expected.BackupName, actual.BackupName) + } + + } +} + +func TestParseBackupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/backups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe/bAcKuPs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/backups/backupName", + Expected: &BackupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupVaultName: "backupVaultName", + BackupName: "backupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/backups/backupName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe/bAcKuPs/bAcKuPnAmE", + Expected: &BackupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + BackupVaultName: "bAcKuPvAuLtNaMe", + BackupName: "bAcKuPnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe/bAcKuPs/bAcKuPnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + + if actual.BackupName != v.Expected.BackupName { + t.Fatalf("Expected %q but got %q for BackupName", v.Expected.BackupName, actual.BackupName) + } + + } +} + +func TestSegmentsForBackupId(t *testing.T) { + segments := BackupId{}.Segments() + if len(segments) == 0 { + t.Fatalf("BackupId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_backupvault.go b/resource-manager/netapp/2025-06-01/backups/id_backupvault.go new file mode 100644 index 00000000000..779adf353ca --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_backupvault.go @@ -0,0 +1,139 @@ +package backups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BackupVaultId{}) +} + +var _ resourceids.ResourceId = &BackupVaultId{} + +// BackupVaultId is a struct representing the Resource ID for a Backup Vault +type BackupVaultId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + BackupVaultName string +} + +// NewBackupVaultID returns a new BackupVaultId struct +func NewBackupVaultID(subscriptionId string, resourceGroupName string, netAppAccountName string, backupVaultName string) BackupVaultId { + return BackupVaultId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + BackupVaultName: backupVaultName, + } +} + +// ParseBackupVaultID parses 'input' into a BackupVaultId +func ParseBackupVaultID(input string) (*BackupVaultId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupVaultId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupVaultId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBackupVaultIDInsensitively parses 'input' case-insensitively into a BackupVaultId +// note: this method should only be used for API response data and not user input +func ParseBackupVaultIDInsensitively(input string) (*BackupVaultId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupVaultId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupVaultId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BackupVaultId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.BackupVaultName, ok = input.Parsed["backupVaultName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "backupVaultName", input) + } + + return nil +} + +// ValidateBackupVaultID checks that 'input' can be parsed as a Backup Vault ID +func ValidateBackupVaultID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBackupVaultID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Backup Vault ID +func (id BackupVaultId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/backupVaults/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.BackupVaultName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Backup Vault ID +func (id BackupVaultId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticBackupVaults", "backupVaults", "backupVaults"), + resourceids.UserSpecifiedSegment("backupVaultName", "backupVaultName"), + } +} + +// String returns a human-readable description of this Backup Vault ID +func (id BackupVaultId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Backup Vault Name: %q", id.BackupVaultName), + } + return fmt.Sprintf("Backup Vault (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_backupvault_test.go b/resource-manager/netapp/2025-06-01/backups/id_backupvault_test.go new file mode 100644 index 00000000000..1601bd44bc7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_backupvault_test.go @@ -0,0 +1,327 @@ +package backups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &BackupVaultId{} + +func TestNewBackupVaultID(t *testing.T) { + id := NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.BackupVaultName != "backupVaultName" { + t.Fatalf("Expected %q but got %q for Segment 'BackupVaultName'", id.BackupVaultName, "backupVaultName") + } +} + +func TestFormatBackupVaultID(t *testing.T) { + actual := NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseBackupVaultID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupVaultId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName", + Expected: &BackupVaultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupVaultName: "backupVaultName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupVaultID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + + } +} + +func TestParseBackupVaultIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupVaultId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName", + Expected: &BackupVaultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupVaultName: "backupVaultName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe", + Expected: &BackupVaultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + BackupVaultName: "bAcKuPvAuLtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupVaultIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + + } +} + +func TestSegmentsForBackupVaultId(t *testing.T) { + segments := BackupVaultId{}.Segments() + if len(segments) == 0 { + t.Fatalf("BackupVaultId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_netappaccount.go b/resource-manager/netapp/2025-06-01/backups/id_netappaccount.go new file mode 100644 index 00000000000..516c7ff3911 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_netappaccount.go @@ -0,0 +1,130 @@ +package backups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&NetAppAccountId{}) +} + +var _ resourceids.ResourceId = &NetAppAccountId{} + +// NetAppAccountId is a struct representing the Resource ID for a Net App Account +type NetAppAccountId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string +} + +// NewNetAppAccountID returns a new NetAppAccountId struct +func NewNetAppAccountID(subscriptionId string, resourceGroupName string, netAppAccountName string) NetAppAccountId { + return NetAppAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + } +} + +// ParseNetAppAccountID parses 'input' into a NetAppAccountId +func ParseNetAppAccountID(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseNetAppAccountIDInsensitively parses 'input' case-insensitively into a NetAppAccountId +// note: this method should only be used for API response data and not user input +func ParseNetAppAccountIDInsensitively(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *NetAppAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + return nil +} + +// ValidateNetAppAccountID checks that 'input' can be parsed as a Net App Account ID +func ValidateNetAppAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNetAppAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Net App Account ID +func (id NetAppAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Net App Account ID +func (id NetAppAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + } +} + +// String returns a human-readable description of this Net App Account ID +func (id NetAppAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + } + return fmt.Sprintf("Net App Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_netappaccount_test.go b/resource-manager/netapp/2025-06-01/backups/id_netappaccount_test.go new file mode 100644 index 00000000000..18f86cc0302 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_netappaccount_test.go @@ -0,0 +1,282 @@ +package backups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &NetAppAccountId{} + +func TestNewNetAppAccountID(t *testing.T) { + id := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } +} + +func TestFormatNetAppAccountID(t *testing.T) { + actual := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseNetAppAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestParseNetAppAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestSegmentsForNetAppAccountId(t *testing.T) { + segments := NetAppAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("NetAppAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_volume.go b/resource-manager/netapp/2025-06-01/backups/id_volume.go new file mode 100644 index 00000000000..68f2aee4179 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_volume.go @@ -0,0 +1,148 @@ +package backups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backups/id_volume_test.go b/resource-manager/netapp/2025-06-01/backups/id_volume_test.go new file mode 100644 index 00000000000..8f1a2ec3f88 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/id_volume_test.go @@ -0,0 +1,372 @@ +package backups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_create.go b/resource-manager/netapp/2025-06-01/backups/method_create.go new file mode 100644 index 00000000000..547e1883e1f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_create.go @@ -0,0 +1,75 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Backup +} + +// Create ... +func (c BackupsClient) Create(ctx context.Context, id BackupId, input Backup) (result CreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateThenPoll performs Create then polls until it's completed +func (c BackupsClient) CreateThenPoll(ctx context.Context, id BackupId, input Backup) error { + result, err := c.Create(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Create: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Create: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_delete.go b/resource-manager/netapp/2025-06-01/backups/method_delete.go new file mode 100644 index 00000000000..15a40b88c3c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_delete.go @@ -0,0 +1,70 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c BackupsClient) Delete(ctx context.Context, id BackupId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c BackupsClient) DeleteThenPoll(ctx context.Context, id BackupId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_get.go b/resource-manager/netapp/2025-06-01/backups/method_get.go new file mode 100644 index 00000000000..fe88bb84d0a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_get.go @@ -0,0 +1,53 @@ +package backups + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Backup +} + +// Get ... +func (c BackupsClient) Get(ctx context.Context, id BackupId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Backup + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_getlateststatus.go b/resource-manager/netapp/2025-06-01/backups/method_getlateststatus.go new file mode 100644 index 00000000000..1668fdfcf7e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_getlateststatus.go @@ -0,0 +1,54 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetLatestStatusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BackupStatus +} + +// GetLatestStatus ... +func (c BackupsClient) GetLatestStatus(ctx context.Context, id VolumeId) (result GetLatestStatusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/latestBackupStatus/current", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BackupStatus + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_listbyvault.go b/resource-manager/netapp/2025-06-01/backups/method_listbyvault.go new file mode 100644 index 00000000000..3416244ff49 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_listbyvault.go @@ -0,0 +1,134 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByVaultOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Backup +} + +type ListByVaultCompleteResult struct { + LatestHttpResponse *http.Response + Items []Backup +} + +type ListByVaultOperationOptions struct { + Filter *string +} + +func DefaultListByVaultOperationOptions() ListByVaultOperationOptions { + return ListByVaultOperationOptions{} +} + +func (o ListByVaultOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o ListByVaultOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o ListByVaultOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.Filter != nil { + out.Append("$filter", fmt.Sprintf("%v", *o.Filter)) + } + return &out +} + +type ListByVaultCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByVaultCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByVault ... +func (c BackupsClient) ListByVault(ctx context.Context, id BackupVaultId, options ListByVaultOperationOptions) (result ListByVaultOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + OptionsObject: options, + Pager: &ListByVaultCustomPager{}, + Path: fmt.Sprintf("%s/backups", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Backup `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByVaultComplete retrieves all the results into a single object +func (c BackupsClient) ListByVaultComplete(ctx context.Context, id BackupVaultId, options ListByVaultOperationOptions) (ListByVaultCompleteResult, error) { + return c.ListByVaultCompleteMatchingPredicate(ctx, id, options, BackupOperationPredicate{}) +} + +// ListByVaultCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c BackupsClient) ListByVaultCompleteMatchingPredicate(ctx context.Context, id BackupVaultId, options ListByVaultOperationOptions, predicate BackupOperationPredicate) (result ListByVaultCompleteResult, err error) { + items := make([]Backup, 0) + + resp, err := c.ListByVault(ctx, id, options) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByVaultCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_underaccountmigratebackups.go b/resource-manager/netapp/2025-06-01/backups/method_underaccountmigratebackups.go new file mode 100644 index 00000000000..d8792fed2b6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_underaccountmigratebackups.go @@ -0,0 +1,73 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UnderAccountMigrateBackupsOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// UnderAccountMigrateBackups ... +func (c BackupsClient) UnderAccountMigrateBackups(ctx context.Context, id NetAppAccountId, input BackupsMigrationRequest) (result UnderAccountMigrateBackupsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/migrateBackups", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UnderAccountMigrateBackupsThenPoll performs UnderAccountMigrateBackups then polls until it's completed +func (c BackupsClient) UnderAccountMigrateBackupsThenPoll(ctx context.Context, id NetAppAccountId, input BackupsMigrationRequest) error { + result, err := c.UnderAccountMigrateBackups(ctx, id, input) + if err != nil { + return fmt.Errorf("performing UnderAccountMigrateBackups: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after UnderAccountMigrateBackups: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_underbackupvaultrestorefiles.go b/resource-manager/netapp/2025-06-01/backups/method_underbackupvaultrestorefiles.go new file mode 100644 index 00000000000..fae1ff9c3ac --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_underbackupvaultrestorefiles.go @@ -0,0 +1,73 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UnderBackupVaultRestoreFilesOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// UnderBackupVaultRestoreFiles ... +func (c BackupsClient) UnderBackupVaultRestoreFiles(ctx context.Context, id BackupId, input BackupRestoreFiles) (result UnderBackupVaultRestoreFilesOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/restoreFiles", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UnderBackupVaultRestoreFilesThenPoll performs UnderBackupVaultRestoreFiles then polls until it's completed +func (c BackupsClient) UnderBackupVaultRestoreFilesThenPoll(ctx context.Context, id BackupId, input BackupRestoreFiles) error { + result, err := c.UnderBackupVaultRestoreFiles(ctx, id, input) + if err != nil { + return fmt.Errorf("performing UnderBackupVaultRestoreFiles: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after UnderBackupVaultRestoreFiles: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_undervolumemigratebackups.go b/resource-manager/netapp/2025-06-01/backups/method_undervolumemigratebackups.go new file mode 100644 index 00000000000..1bbdca6b084 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_undervolumemigratebackups.go @@ -0,0 +1,73 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UnderVolumeMigrateBackupsOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// UnderVolumeMigrateBackups ... +func (c BackupsClient) UnderVolumeMigrateBackups(ctx context.Context, id VolumeId, input BackupsMigrationRequest) (result UnderVolumeMigrateBackupsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/migrateBackups", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UnderVolumeMigrateBackupsThenPoll performs UnderVolumeMigrateBackups then polls until it's completed +func (c BackupsClient) UnderVolumeMigrateBackupsThenPoll(ctx context.Context, id VolumeId, input BackupsMigrationRequest) error { + result, err := c.UnderVolumeMigrateBackups(ctx, id, input) + if err != nil { + return fmt.Errorf("performing UnderVolumeMigrateBackups: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after UnderVolumeMigrateBackups: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/method_update.go b/resource-manager/netapp/2025-06-01/backups/method_update.go new file mode 100644 index 00000000000..0a8669d7caa --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/method_update.go @@ -0,0 +1,75 @@ +package backups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Backup +} + +// Update ... +func (c BackupsClient) Update(ctx context.Context, id BackupId, input BackupPatch) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c BackupsClient) UpdateThenPoll(ctx context.Context, id BackupId, input BackupPatch) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backups/model_backup.go b/resource-manager/netapp/2025-06-01/backups/model_backup.go new file mode 100644 index 00000000000..ef5ef0a7bc6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/model_backup.go @@ -0,0 +1,16 @@ +package backups + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Backup struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties BackupProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backups/model_backuppatch.go b/resource-manager/netapp/2025-06-01/backups/model_backuppatch.go new file mode 100644 index 00000000000..a87251f4b0c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/model_backuppatch.go @@ -0,0 +1,8 @@ +package backups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPatch struct { + Properties *BackupPatchProperties `json:"properties,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backups/model_backuppatchproperties.go b/resource-manager/netapp/2025-06-01/backups/model_backuppatchproperties.go new file mode 100644 index 00000000000..b374bda3143 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/model_backuppatchproperties.go @@ -0,0 +1,8 @@ +package backups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupPatchProperties struct { + Label *string `json:"label,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backups/model_backupproperties.go b/resource-manager/netapp/2025-06-01/backups/model_backupproperties.go new file mode 100644 index 00000000000..c15a08eb1f2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/model_backupproperties.go @@ -0,0 +1,63 @@ +package backups + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupProperties struct { + BackupId *string `json:"backupId,omitempty"` + BackupPolicyResourceId *string `json:"backupPolicyResourceId,omitempty"` + BackupType *BackupType `json:"backupType,omitempty"` + CompletionDate *string `json:"completionDate,omitempty"` + CreationDate *string `json:"creationDate,omitempty"` + FailureReason *string `json:"failureReason,omitempty"` + IsLargeVolume *bool `json:"isLargeVolume,omitempty"` + Label *string `json:"label,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Size *int64 `json:"size,omitempty"` + SnapshotCreationDate *string `json:"snapshotCreationDate,omitempty"` + SnapshotName *string `json:"snapshotName,omitempty"` + UseExistingSnapshot *bool `json:"useExistingSnapshot,omitempty"` + VolumeResourceId string `json:"volumeResourceId"` +} + +func (o *BackupProperties) GetCompletionDateAsTime() (*time.Time, error) { + if o.CompletionDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CompletionDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupProperties) SetCompletionDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CompletionDate = &formatted +} + +func (o *BackupProperties) GetCreationDateAsTime() (*time.Time, error) { + if o.CreationDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CreationDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupProperties) SetCreationDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationDate = &formatted +} + +func (o *BackupProperties) GetSnapshotCreationDateAsTime() (*time.Time, error) { + if o.SnapshotCreationDate == nil { + return nil, nil + } + return dates.ParseAsFormat(o.SnapshotCreationDate, "2006-01-02T15:04:05Z07:00") +} + +func (o *BackupProperties) SetSnapshotCreationDateAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.SnapshotCreationDate = &formatted +} diff --git a/resource-manager/netapp/2025-06-01/backups/model_backuprestorefiles.go b/resource-manager/netapp/2025-06-01/backups/model_backuprestorefiles.go new file mode 100644 index 00000000000..e26ce886407 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/model_backuprestorefiles.go @@ -0,0 +1,10 @@ +package backups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupRestoreFiles struct { + DestinationVolumeId string `json:"destinationVolumeId"` + FileList []string `json:"fileList"` + RestoreFilePath *string `json:"restoreFilePath,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backups/model_backupsmigrationrequest.go b/resource-manager/netapp/2025-06-01/backups/model_backupsmigrationrequest.go new file mode 100644 index 00000000000..f0fb612b507 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/model_backupsmigrationrequest.go @@ -0,0 +1,8 @@ +package backups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupsMigrationRequest struct { + BackupVaultId string `json:"backupVaultId"` +} diff --git a/resource-manager/netapp/2025-06-01/backups/model_backupstatus.go b/resource-manager/netapp/2025-06-01/backups/model_backupstatus.go new file mode 100644 index 00000000000..e873cd9463d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/model_backupstatus.go @@ -0,0 +1,16 @@ +package backups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupStatus struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Healthy *bool `json:"healthy,omitempty"` + LastTransferSize *int64 `json:"lastTransferSize,omitempty"` + LastTransferType *string `json:"lastTransferType,omitempty"` + MirrorState *MirrorState `json:"mirrorState,omitempty"` + RelationshipStatus *RelationshipStatus `json:"relationshipStatus,omitempty"` + TotalTransferBytes *int64 `json:"totalTransferBytes,omitempty"` + TransferProgressBytes *int64 `json:"transferProgressBytes,omitempty"` + UnhealthyReason *string `json:"unhealthyReason,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backups/predicates.go b/resource-manager/netapp/2025-06-01/backups/predicates.go new file mode 100644 index 00000000000..f5033a2bb91 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/predicates.go @@ -0,0 +1,27 @@ +package backups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p BackupOperationPredicate) Matches(input Backup) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/netapp/2025-06-01/backups/version.go b/resource-manager/netapp/2025-06-01/backups/version.go new file mode 100644 index 00000000000..674b8df8727 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backups/version.go @@ -0,0 +1,10 @@ +package backups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/backups/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/README.md b/resource-manager/netapp/2025-06-01/backupvaults/README.md new file mode 100644 index 00000000000..87921d16312 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/README.md @@ -0,0 +1,99 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backupvaults` Documentation + +The `backupvaults` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backupvaults" +``` + + +### Client Initialization + +```go +client := backupvaults.NewBackupVaultsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `BackupVaultsClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := backupvaults.NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName") + +payload := backupvaults.BackupVault{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupVaultsClient.Delete` + +```go +ctx := context.TODO() +id := backupvaults.NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `BackupVaultsClient.Get` + +```go +ctx := context.TODO() +id := backupvaults.NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `BackupVaultsClient.ListByNetAppAccount` + +```go +ctx := context.TODO() +id := backupvaults.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +// alternatively `client.ListByNetAppAccount(ctx, id)` can be used to do batched pagination +items, err := client.ListByNetAppAccountComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `BackupVaultsClient.Update` + +```go +ctx := context.TODO() +id := backupvaults.NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName") + +payload := backupvaults.BackupVaultPatch{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/backupvaults/client.go b/resource-manager/netapp/2025-06-01/backupvaults/client.go new file mode 100644 index 00000000000..57d96b431b5 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/client.go @@ -0,0 +1,26 @@ +package backupvaults + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupVaultsClient struct { + Client *resourcemanager.Client +} + +func NewBackupVaultsClientWithBaseURI(sdkApi sdkEnv.Api) (*BackupVaultsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "backupvaults", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating BackupVaultsClient: %+v", err) + } + + return &BackupVaultsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/id_backupvault.go b/resource-manager/netapp/2025-06-01/backupvaults/id_backupvault.go new file mode 100644 index 00000000000..84c03165062 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/id_backupvault.go @@ -0,0 +1,139 @@ +package backupvaults + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&BackupVaultId{}) +} + +var _ resourceids.ResourceId = &BackupVaultId{} + +// BackupVaultId is a struct representing the Resource ID for a Backup Vault +type BackupVaultId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + BackupVaultName string +} + +// NewBackupVaultID returns a new BackupVaultId struct +func NewBackupVaultID(subscriptionId string, resourceGroupName string, netAppAccountName string, backupVaultName string) BackupVaultId { + return BackupVaultId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + BackupVaultName: backupVaultName, + } +} + +// ParseBackupVaultID parses 'input' into a BackupVaultId +func ParseBackupVaultID(input string) (*BackupVaultId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupVaultId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupVaultId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseBackupVaultIDInsensitively parses 'input' case-insensitively into a BackupVaultId +// note: this method should only be used for API response data and not user input +func ParseBackupVaultIDInsensitively(input string) (*BackupVaultId, error) { + parser := resourceids.NewParserFromResourceIdType(&BackupVaultId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := BackupVaultId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *BackupVaultId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.BackupVaultName, ok = input.Parsed["backupVaultName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "backupVaultName", input) + } + + return nil +} + +// ValidateBackupVaultID checks that 'input' can be parsed as a Backup Vault ID +func ValidateBackupVaultID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseBackupVaultID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Backup Vault ID +func (id BackupVaultId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/backupVaults/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.BackupVaultName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Backup Vault ID +func (id BackupVaultId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticBackupVaults", "backupVaults", "backupVaults"), + resourceids.UserSpecifiedSegment("backupVaultName", "backupVaultName"), + } +} + +// String returns a human-readable description of this Backup Vault ID +func (id BackupVaultId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Backup Vault Name: %q", id.BackupVaultName), + } + return fmt.Sprintf("Backup Vault (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/id_backupvault_test.go b/resource-manager/netapp/2025-06-01/backupvaults/id_backupvault_test.go new file mode 100644 index 00000000000..2a77aec3df2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/id_backupvault_test.go @@ -0,0 +1,327 @@ +package backupvaults + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &BackupVaultId{} + +func TestNewBackupVaultID(t *testing.T) { + id := NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.BackupVaultName != "backupVaultName" { + t.Fatalf("Expected %q but got %q for Segment 'BackupVaultName'", id.BackupVaultName, "backupVaultName") + } +} + +func TestFormatBackupVaultID(t *testing.T) { + actual := NewBackupVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "backupVaultName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseBackupVaultID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupVaultId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName", + Expected: &BackupVaultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupVaultName: "backupVaultName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupVaultID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + + } +} + +func TestParseBackupVaultIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *BackupVaultId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName", + Expected: &BackupVaultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + BackupVaultName: "backupVaultName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/backupVaults/backupVaultName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe", + Expected: &BackupVaultId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + BackupVaultName: "bAcKuPvAuLtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/bAcKuPvAuLtS/bAcKuPvAuLtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseBackupVaultIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.BackupVaultName != v.Expected.BackupVaultName { + t.Fatalf("Expected %q but got %q for BackupVaultName", v.Expected.BackupVaultName, actual.BackupVaultName) + } + + } +} + +func TestSegmentsForBackupVaultId(t *testing.T) { + segments := BackupVaultId{}.Segments() + if len(segments) == 0 { + t.Fatalf("BackupVaultId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/id_netappaccount.go b/resource-manager/netapp/2025-06-01/backupvaults/id_netappaccount.go new file mode 100644 index 00000000000..203dcf207ec --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/id_netappaccount.go @@ -0,0 +1,130 @@ +package backupvaults + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&NetAppAccountId{}) +} + +var _ resourceids.ResourceId = &NetAppAccountId{} + +// NetAppAccountId is a struct representing the Resource ID for a Net App Account +type NetAppAccountId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string +} + +// NewNetAppAccountID returns a new NetAppAccountId struct +func NewNetAppAccountID(subscriptionId string, resourceGroupName string, netAppAccountName string) NetAppAccountId { + return NetAppAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + } +} + +// ParseNetAppAccountID parses 'input' into a NetAppAccountId +func ParseNetAppAccountID(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseNetAppAccountIDInsensitively parses 'input' case-insensitively into a NetAppAccountId +// note: this method should only be used for API response data and not user input +func ParseNetAppAccountIDInsensitively(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *NetAppAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + return nil +} + +// ValidateNetAppAccountID checks that 'input' can be parsed as a Net App Account ID +func ValidateNetAppAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNetAppAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Net App Account ID +func (id NetAppAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Net App Account ID +func (id NetAppAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + } +} + +// String returns a human-readable description of this Net App Account ID +func (id NetAppAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + } + return fmt.Sprintf("Net App Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/id_netappaccount_test.go b/resource-manager/netapp/2025-06-01/backupvaults/id_netappaccount_test.go new file mode 100644 index 00000000000..151b0b66fb7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/id_netappaccount_test.go @@ -0,0 +1,282 @@ +package backupvaults + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &NetAppAccountId{} + +func TestNewNetAppAccountID(t *testing.T) { + id := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } +} + +func TestFormatNetAppAccountID(t *testing.T) { + actual := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseNetAppAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestParseNetAppAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestSegmentsForNetAppAccountId(t *testing.T) { + segments := NetAppAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("NetAppAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/method_createorupdate.go b/resource-manager/netapp/2025-06-01/backupvaults/method_createorupdate.go new file mode 100644 index 00000000000..0f75206e2d7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/method_createorupdate.go @@ -0,0 +1,75 @@ +package backupvaults + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BackupVault +} + +// CreateOrUpdate ... +func (c BackupVaultsClient) CreateOrUpdate(ctx context.Context, id BackupVaultId, input BackupVault) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c BackupVaultsClient) CreateOrUpdateThenPoll(ctx context.Context, id BackupVaultId, input BackupVault) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/method_delete.go b/resource-manager/netapp/2025-06-01/backupvaults/method_delete.go new file mode 100644 index 00000000000..32a1387f1ba --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/method_delete.go @@ -0,0 +1,70 @@ +package backupvaults + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c BackupVaultsClient) Delete(ctx context.Context, id BackupVaultId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c BackupVaultsClient) DeleteThenPoll(ctx context.Context, id BackupVaultId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/method_get.go b/resource-manager/netapp/2025-06-01/backupvaults/method_get.go new file mode 100644 index 00000000000..6cd544f7ca9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/method_get.go @@ -0,0 +1,53 @@ +package backupvaults + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *BackupVault +} + +// Get ... +func (c BackupVaultsClient) Get(ctx context.Context, id BackupVaultId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model BackupVault + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/method_listbynetappaccount.go b/resource-manager/netapp/2025-06-01/backupvaults/method_listbynetappaccount.go new file mode 100644 index 00000000000..a0f46fc1023 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/method_listbynetappaccount.go @@ -0,0 +1,105 @@ +package backupvaults + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByNetAppAccountOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]BackupVault +} + +type ListByNetAppAccountCompleteResult struct { + LatestHttpResponse *http.Response + Items []BackupVault +} + +type ListByNetAppAccountCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByNetAppAccountCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByNetAppAccount ... +func (c BackupVaultsClient) ListByNetAppAccount(ctx context.Context, id NetAppAccountId) (result ListByNetAppAccountOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByNetAppAccountCustomPager{}, + Path: fmt.Sprintf("%s/backupVaults", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]BackupVault `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByNetAppAccountComplete retrieves all the results into a single object +func (c BackupVaultsClient) ListByNetAppAccountComplete(ctx context.Context, id NetAppAccountId) (ListByNetAppAccountCompleteResult, error) { + return c.ListByNetAppAccountCompleteMatchingPredicate(ctx, id, BackupVaultOperationPredicate{}) +} + +// ListByNetAppAccountCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c BackupVaultsClient) ListByNetAppAccountCompleteMatchingPredicate(ctx context.Context, id NetAppAccountId, predicate BackupVaultOperationPredicate) (result ListByNetAppAccountCompleteResult, err error) { + items := make([]BackupVault, 0) + + resp, err := c.ListByNetAppAccount(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByNetAppAccountCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/method_update.go b/resource-manager/netapp/2025-06-01/backupvaults/method_update.go new file mode 100644 index 00000000000..8ea7dce3123 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/method_update.go @@ -0,0 +1,75 @@ +package backupvaults + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *BackupVault +} + +// Update ... +func (c BackupVaultsClient) Update(ctx context.Context, id BackupVaultId, input BackupVaultPatch) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c BackupVaultsClient) UpdateThenPoll(ctx context.Context, id BackupVaultId, input BackupVaultPatch) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/model_backupvault.go b/resource-manager/netapp/2025-06-01/backupvaults/model_backupvault.go new file mode 100644 index 00000000000..0e8761f2293 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/model_backupvault.go @@ -0,0 +1,18 @@ +package backupvaults + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupVault struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *BackupVaultProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/model_backupvaultpatch.go b/resource-manager/netapp/2025-06-01/backupvaults/model_backupvaultpatch.go new file mode 100644 index 00000000000..899e2ecf858 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/model_backupvaultpatch.go @@ -0,0 +1,8 @@ +package backupvaults + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupVaultPatch struct { + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/model_backupvaultproperties.go b/resource-manager/netapp/2025-06-01/backupvaults/model_backupvaultproperties.go new file mode 100644 index 00000000000..7327d4d2119 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/model_backupvaultproperties.go @@ -0,0 +1,8 @@ +package backupvaults + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupVaultProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/predicates.go b/resource-manager/netapp/2025-06-01/backupvaults/predicates.go new file mode 100644 index 00000000000..53d942f568c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/predicates.go @@ -0,0 +1,32 @@ +package backupvaults + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupVaultOperationPredicate struct { + Id *string + Location *string + Name *string + Type *string +} + +func (p BackupVaultOperationPredicate) Matches(input BackupVault) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/netapp/2025-06-01/backupvaults/version.go b/resource-manager/netapp/2025-06-01/backupvaults/version.go new file mode 100644 index 00000000000..7bec879d34f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/backupvaults/version.go @@ -0,0 +1,10 @@ +package backupvaults + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/backupvaults/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/README.md b/resource-manager/netapp/2025-06-01/capacitypools/README.md new file mode 100644 index 00000000000..08f86a5acc6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/README.md @@ -0,0 +1,99 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/capacitypools` Documentation + +The `capacitypools` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/capacitypools" +``` + + +### Client Initialization + +```go +client := capacitypools.NewCapacityPoolsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `CapacityPoolsClient.PoolsCreateOrUpdate` + +```go +ctx := context.TODO() +id := capacitypools.NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName") + +payload := capacitypools.CapacityPool{ + // ... +} + + +if err := client.PoolsCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `CapacityPoolsClient.PoolsDelete` + +```go +ctx := context.TODO() +id := capacitypools.NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName") + +if err := client.PoolsDeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `CapacityPoolsClient.PoolsGet` + +```go +ctx := context.TODO() +id := capacitypools.NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName") + +read, err := client.PoolsGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `CapacityPoolsClient.PoolsList` + +```go +ctx := context.TODO() +id := capacitypools.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +// alternatively `client.PoolsList(ctx, id)` can be used to do batched pagination +items, err := client.PoolsListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `CapacityPoolsClient.PoolsUpdate` + +```go +ctx := context.TODO() +id := capacitypools.NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName") + +payload := capacitypools.CapacityPoolPatch{ + // ... +} + + +if err := client.PoolsUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/capacitypools/client.go b/resource-manager/netapp/2025-06-01/capacitypools/client.go new file mode 100644 index 00000000000..d7a8be4e076 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/client.go @@ -0,0 +1,26 @@ +package capacitypools + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CapacityPoolsClient struct { + Client *resourcemanager.Client +} + +func NewCapacityPoolsClientWithBaseURI(sdkApi sdkEnv.Api) (*CapacityPoolsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "capacitypools", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating CapacityPoolsClient: %+v", err) + } + + return &CapacityPoolsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/constants.go b/resource-manager/netapp/2025-06-01/capacitypools/constants.go new file mode 100644 index 00000000000..472afe20d70 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/constants.go @@ -0,0 +1,142 @@ +package capacitypools + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EncryptionType string + +const ( + EncryptionTypeDouble EncryptionType = "Double" + EncryptionTypeSingle EncryptionType = "Single" +) + +func PossibleValuesForEncryptionType() []string { + return []string{ + string(EncryptionTypeDouble), + string(EncryptionTypeSingle), + } +} + +func (s *EncryptionType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEncryptionType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEncryptionType(input string) (*EncryptionType, error) { + vals := map[string]EncryptionType{ + "double": EncryptionTypeDouble, + "single": EncryptionTypeSingle, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EncryptionType(input) + return &out, nil +} + +type QosType string + +const ( + QosTypeAuto QosType = "Auto" + QosTypeManual QosType = "Manual" +) + +func PossibleValuesForQosType() []string { + return []string{ + string(QosTypeAuto), + string(QosTypeManual), + } +} + +func (s *QosType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseQosType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseQosType(input string) (*QosType, error) { + vals := map[string]QosType{ + "auto": QosTypeAuto, + "manual": QosTypeManual, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := QosType(input) + return &out, nil +} + +type ServiceLevel string + +const ( + ServiceLevelFlexible ServiceLevel = "Flexible" + ServiceLevelPremium ServiceLevel = "Premium" + ServiceLevelStandard ServiceLevel = "Standard" + ServiceLevelStandardZRS ServiceLevel = "StandardZRS" + ServiceLevelUltra ServiceLevel = "Ultra" +) + +func PossibleValuesForServiceLevel() []string { + return []string{ + string(ServiceLevelFlexible), + string(ServiceLevelPremium), + string(ServiceLevelStandard), + string(ServiceLevelStandardZRS), + string(ServiceLevelUltra), + } +} + +func (s *ServiceLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceLevel(input string) (*ServiceLevel, error) { + vals := map[string]ServiceLevel{ + "flexible": ServiceLevelFlexible, + "premium": ServiceLevelPremium, + "standard": ServiceLevelStandard, + "standardzrs": ServiceLevelStandardZRS, + "ultra": ServiceLevelUltra, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceLevel(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/id_capacitypool.go b/resource-manager/netapp/2025-06-01/capacitypools/id_capacitypool.go new file mode 100644 index 00000000000..0e041090dd4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/id_capacitypool.go @@ -0,0 +1,139 @@ +package capacitypools + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&CapacityPoolId{}) +} + +var _ resourceids.ResourceId = &CapacityPoolId{} + +// CapacityPoolId is a struct representing the Resource ID for a Capacity Pool +type CapacityPoolId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string +} + +// NewCapacityPoolID returns a new CapacityPoolId struct +func NewCapacityPoolID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string) CapacityPoolId { + return CapacityPoolId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + } +} + +// ParseCapacityPoolID parses 'input' into a CapacityPoolId +func ParseCapacityPoolID(input string) (*CapacityPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&CapacityPoolId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := CapacityPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseCapacityPoolIDInsensitively parses 'input' case-insensitively into a CapacityPoolId +// note: this method should only be used for API response data and not user input +func ParseCapacityPoolIDInsensitively(input string) (*CapacityPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&CapacityPoolId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := CapacityPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *CapacityPoolId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + return nil +} + +// ValidateCapacityPoolID checks that 'input' can be parsed as a Capacity Pool ID +func ValidateCapacityPoolID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseCapacityPoolID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Capacity Pool ID +func (id CapacityPoolId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Capacity Pool ID +func (id CapacityPoolId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + } +} + +// String returns a human-readable description of this Capacity Pool ID +func (id CapacityPoolId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + } + return fmt.Sprintf("Capacity Pool (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/id_capacitypool_test.go b/resource-manager/netapp/2025-06-01/capacitypools/id_capacitypool_test.go new file mode 100644 index 00000000000..6bfc38a792d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/id_capacitypool_test.go @@ -0,0 +1,327 @@ +package capacitypools + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &CapacityPoolId{} + +func TestNewCapacityPoolID(t *testing.T) { + id := NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } +} + +func TestFormatCapacityPoolID(t *testing.T) { + actual := NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseCapacityPoolID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CapacityPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Expected: &CapacityPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCapacityPoolID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + } +} + +func TestParseCapacityPoolIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CapacityPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Expected: &CapacityPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Expected: &CapacityPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCapacityPoolIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + } +} + +func TestSegmentsForCapacityPoolId(t *testing.T) { + segments := CapacityPoolId{}.Segments() + if len(segments) == 0 { + t.Fatalf("CapacityPoolId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/id_netappaccount.go b/resource-manager/netapp/2025-06-01/capacitypools/id_netappaccount.go new file mode 100644 index 00000000000..58a09e2952e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/id_netappaccount.go @@ -0,0 +1,130 @@ +package capacitypools + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&NetAppAccountId{}) +} + +var _ resourceids.ResourceId = &NetAppAccountId{} + +// NetAppAccountId is a struct representing the Resource ID for a Net App Account +type NetAppAccountId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string +} + +// NewNetAppAccountID returns a new NetAppAccountId struct +func NewNetAppAccountID(subscriptionId string, resourceGroupName string, netAppAccountName string) NetAppAccountId { + return NetAppAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + } +} + +// ParseNetAppAccountID parses 'input' into a NetAppAccountId +func ParseNetAppAccountID(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseNetAppAccountIDInsensitively parses 'input' case-insensitively into a NetAppAccountId +// note: this method should only be used for API response data and not user input +func ParseNetAppAccountIDInsensitively(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *NetAppAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + return nil +} + +// ValidateNetAppAccountID checks that 'input' can be parsed as a Net App Account ID +func ValidateNetAppAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNetAppAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Net App Account ID +func (id NetAppAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Net App Account ID +func (id NetAppAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + } +} + +// String returns a human-readable description of this Net App Account ID +func (id NetAppAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + } + return fmt.Sprintf("Net App Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/id_netappaccount_test.go b/resource-manager/netapp/2025-06-01/capacitypools/id_netappaccount_test.go new file mode 100644 index 00000000000..91d9bae2f9f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/id_netappaccount_test.go @@ -0,0 +1,282 @@ +package capacitypools + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &NetAppAccountId{} + +func TestNewNetAppAccountID(t *testing.T) { + id := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } +} + +func TestFormatNetAppAccountID(t *testing.T) { + actual := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseNetAppAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestParseNetAppAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestSegmentsForNetAppAccountId(t *testing.T) { + segments := NetAppAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("NetAppAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/method_poolscreateorupdate.go b/resource-manager/netapp/2025-06-01/capacitypools/method_poolscreateorupdate.go new file mode 100644 index 00000000000..2083a6d9987 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/method_poolscreateorupdate.go @@ -0,0 +1,75 @@ +package capacitypools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolsCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *CapacityPool +} + +// PoolsCreateOrUpdate ... +func (c CapacityPoolsClient) PoolsCreateOrUpdate(ctx context.Context, id CapacityPoolId, input CapacityPool) (result PoolsCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// PoolsCreateOrUpdateThenPoll performs PoolsCreateOrUpdate then polls until it's completed +func (c CapacityPoolsClient) PoolsCreateOrUpdateThenPoll(ctx context.Context, id CapacityPoolId, input CapacityPool) error { + result, err := c.PoolsCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing PoolsCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after PoolsCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/method_poolsdelete.go b/resource-manager/netapp/2025-06-01/capacitypools/method_poolsdelete.go new file mode 100644 index 00000000000..36c9e0e6bcf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/method_poolsdelete.go @@ -0,0 +1,70 @@ +package capacitypools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolsDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// PoolsDelete ... +func (c CapacityPoolsClient) PoolsDelete(ctx context.Context, id CapacityPoolId) (result PoolsDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// PoolsDeleteThenPoll performs PoolsDelete then polls until it's completed +func (c CapacityPoolsClient) PoolsDeleteThenPoll(ctx context.Context, id CapacityPoolId) error { + result, err := c.PoolsDelete(ctx, id) + if err != nil { + return fmt.Errorf("performing PoolsDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after PoolsDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/method_poolsget.go b/resource-manager/netapp/2025-06-01/capacitypools/method_poolsget.go new file mode 100644 index 00000000000..f9d5fcc8531 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/method_poolsget.go @@ -0,0 +1,53 @@ +package capacitypools + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolsGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *CapacityPool +} + +// PoolsGet ... +func (c CapacityPoolsClient) PoolsGet(ctx context.Context, id CapacityPoolId) (result PoolsGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model CapacityPool + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/method_poolslist.go b/resource-manager/netapp/2025-06-01/capacitypools/method_poolslist.go new file mode 100644 index 00000000000..2fe683d61de --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/method_poolslist.go @@ -0,0 +1,105 @@ +package capacitypools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolsListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]CapacityPool +} + +type PoolsListCompleteResult struct { + LatestHttpResponse *http.Response + Items []CapacityPool +} + +type PoolsListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *PoolsListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// PoolsList ... +func (c CapacityPoolsClient) PoolsList(ctx context.Context, id NetAppAccountId) (result PoolsListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &PoolsListCustomPager{}, + Path: fmt.Sprintf("%s/capacityPools", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]CapacityPool `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// PoolsListComplete retrieves all the results into a single object +func (c CapacityPoolsClient) PoolsListComplete(ctx context.Context, id NetAppAccountId) (PoolsListCompleteResult, error) { + return c.PoolsListCompleteMatchingPredicate(ctx, id, CapacityPoolOperationPredicate{}) +} + +// PoolsListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c CapacityPoolsClient) PoolsListCompleteMatchingPredicate(ctx context.Context, id NetAppAccountId, predicate CapacityPoolOperationPredicate) (result PoolsListCompleteResult, err error) { + items := make([]CapacityPool, 0) + + resp, err := c.PoolsList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = PoolsListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/method_poolsupdate.go b/resource-manager/netapp/2025-06-01/capacitypools/method_poolsupdate.go new file mode 100644 index 00000000000..df2c44337fd --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/method_poolsupdate.go @@ -0,0 +1,75 @@ +package capacitypools + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolsUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *CapacityPool +} + +// PoolsUpdate ... +func (c CapacityPoolsClient) PoolsUpdate(ctx context.Context, id CapacityPoolId, input CapacityPoolPatch) (result PoolsUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// PoolsUpdateThenPoll performs PoolsUpdate then polls until it's completed +func (c CapacityPoolsClient) PoolsUpdateThenPoll(ctx context.Context, id CapacityPoolId, input CapacityPoolPatch) error { + result, err := c.PoolsUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing PoolsUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after PoolsUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/model_capacitypool.go b/resource-manager/netapp/2025-06-01/capacitypools/model_capacitypool.go new file mode 100644 index 00000000000..f5d6f1ea86d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/model_capacitypool.go @@ -0,0 +1,19 @@ +package capacitypools + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CapacityPool struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties PoolProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/model_capacitypoolpatch.go b/resource-manager/netapp/2025-06-01/capacitypools/model_capacitypoolpatch.go new file mode 100644 index 00000000000..46abcde6b33 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/model_capacitypoolpatch.go @@ -0,0 +1,13 @@ +package capacitypools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CapacityPoolPatch struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *PoolPatchProperties `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/model_poolpatchproperties.go b/resource-manager/netapp/2025-06-01/capacitypools/model_poolpatchproperties.go new file mode 100644 index 00000000000..613093b3685 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/model_poolpatchproperties.go @@ -0,0 +1,11 @@ +package capacitypools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolPatchProperties struct { + CoolAccess *bool `json:"coolAccess,omitempty"` + CustomThroughputMibps *float64 `json:"customThroughputMibps,omitempty"` + QosType *QosType `json:"qosType,omitempty"` + Size *int64 `json:"size,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/model_poolproperties.go b/resource-manager/netapp/2025-06-01/capacitypools/model_poolproperties.go new file mode 100644 index 00000000000..2c9bb9eecb5 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/model_poolproperties.go @@ -0,0 +1,17 @@ +package capacitypools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolProperties struct { + CoolAccess *bool `json:"coolAccess,omitempty"` + CustomThroughputMibps *float64 `json:"customThroughputMibps,omitempty"` + EncryptionType *EncryptionType `json:"encryptionType,omitempty"` + PoolId *string `json:"poolId,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + QosType *QosType `json:"qosType,omitempty"` + ServiceLevel ServiceLevel `json:"serviceLevel"` + Size int64 `json:"size"` + TotalThroughputMibps *float64 `json:"totalThroughputMibps,omitempty"` + UtilizedThroughputMibps *float64 `json:"utilizedThroughputMibps,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/predicates.go b/resource-manager/netapp/2025-06-01/capacitypools/predicates.go new file mode 100644 index 00000000000..386a7986d42 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/predicates.go @@ -0,0 +1,37 @@ +package capacitypools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CapacityPoolOperationPredicate struct { + Etag *string + Id *string + Location *string + Name *string + Type *string +} + +func (p CapacityPoolOperationPredicate) Matches(input CapacityPool) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/netapp/2025-06-01/capacitypools/version.go b/resource-manager/netapp/2025-06-01/capacitypools/version.go new file mode 100644 index 00000000000..33331f23942 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/capacitypools/version.go @@ -0,0 +1,10 @@ +package capacitypools + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/capacitypools/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/client.go b/resource-manager/netapp/2025-06-01/client.go new file mode 100644 index 00000000000..5572d39fd6b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/client.go @@ -0,0 +1,235 @@ +package v2025_06_01 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backuppolicy" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backups" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/backupvaults" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/capacitypools" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/filelocks" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/groupidlistforldapuser" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/netappaccounts" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/netappresource" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/poolchange" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/resetcifspassword" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/restore" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshotpolicy" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshots" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/splitclonevolume" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/subvolumes" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumegroups" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumequotarules" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumes" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesonpremmigration" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesrelocation" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesreplication" + "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesrevert" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +type Client struct { + BackupPolicy *backuppolicy.BackupPolicyClient + BackupVaults *backupvaults.BackupVaultsClient + Backups *backups.BackupsClient + CapacityPools *capacitypools.CapacityPoolsClient + FileLocks *filelocks.FileLocksClient + GroupIdListForLDAPUser *groupidlistforldapuser.GroupIdListForLDAPUserClient + NetAppAccounts *netappaccounts.NetAppAccountsClient + NetAppResource *netappresource.NetAppResourceClient + PoolChange *poolchange.PoolChangeClient + ResetCifsPassword *resetcifspassword.ResetCifsPasswordClient + Restore *restore.RestoreClient + SnapshotPolicy *snapshotpolicy.SnapshotPolicyClient + SnapshotPolicyListVolumes *snapshotpolicylistvolumes.SnapshotPolicyListVolumesClient + Snapshots *snapshots.SnapshotsClient + SplitCloneVolume *splitclonevolume.SplitCloneVolumeClient + SubVolumes *subvolumes.SubVolumesClient + VolumeGroups *volumegroups.VolumeGroupsClient + VolumeQuotaRules *volumequotarules.VolumeQuotaRulesClient + Volumes *volumes.VolumesClient + VolumesOnPremMigration *volumesonpremmigration.VolumesOnPremMigrationClient + VolumesOnPremMigrationFinalize *volumesonpremmigrationfinalize.VolumesOnPremMigrationFinalizeClient + VolumesRelocation *volumesrelocation.VolumesRelocationClient + VolumesReplication *volumesreplication.VolumesReplicationClient + VolumesRevert *volumesrevert.VolumesRevertClient +} + +func NewClientWithBaseURI(sdkApi sdkEnv.Api, configureFunc func(c *resourcemanager.Client)) (*Client, error) { + backupPolicyClient, err := backuppolicy.NewBackupPolicyClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building BackupPolicy client: %+v", err) + } + configureFunc(backupPolicyClient.Client) + + backupVaultsClient, err := backupvaults.NewBackupVaultsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building BackupVaults client: %+v", err) + } + configureFunc(backupVaultsClient.Client) + + backupsClient, err := backups.NewBackupsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building Backups client: %+v", err) + } + configureFunc(backupsClient.Client) + + capacityPoolsClient, err := capacitypools.NewCapacityPoolsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building CapacityPools client: %+v", err) + } + configureFunc(capacityPoolsClient.Client) + + fileLocksClient, err := filelocks.NewFileLocksClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building FileLocks client: %+v", err) + } + configureFunc(fileLocksClient.Client) + + groupIdListForLDAPUserClient, err := groupidlistforldapuser.NewGroupIdListForLDAPUserClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building GroupIdListForLDAPUser client: %+v", err) + } + configureFunc(groupIdListForLDAPUserClient.Client) + + netAppAccountsClient, err := netappaccounts.NewNetAppAccountsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building NetAppAccounts client: %+v", err) + } + configureFunc(netAppAccountsClient.Client) + + netAppResourceClient, err := netappresource.NewNetAppResourceClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building NetAppResource client: %+v", err) + } + configureFunc(netAppResourceClient.Client) + + poolChangeClient, err := poolchange.NewPoolChangeClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building PoolChange client: %+v", err) + } + configureFunc(poolChangeClient.Client) + + resetCifsPasswordClient, err := resetcifspassword.NewResetCifsPasswordClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building ResetCifsPassword client: %+v", err) + } + configureFunc(resetCifsPasswordClient.Client) + + restoreClient, err := restore.NewRestoreClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building Restore client: %+v", err) + } + configureFunc(restoreClient.Client) + + snapshotPolicyClient, err := snapshotpolicy.NewSnapshotPolicyClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building SnapshotPolicy client: %+v", err) + } + configureFunc(snapshotPolicyClient.Client) + + snapshotPolicyListVolumesClient, err := snapshotpolicylistvolumes.NewSnapshotPolicyListVolumesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building SnapshotPolicyListVolumes client: %+v", err) + } + configureFunc(snapshotPolicyListVolumesClient.Client) + + snapshotsClient, err := snapshots.NewSnapshotsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building Snapshots client: %+v", err) + } + configureFunc(snapshotsClient.Client) + + splitCloneVolumeClient, err := splitclonevolume.NewSplitCloneVolumeClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building SplitCloneVolume client: %+v", err) + } + configureFunc(splitCloneVolumeClient.Client) + + subVolumesClient, err := subvolumes.NewSubVolumesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building SubVolumes client: %+v", err) + } + configureFunc(subVolumesClient.Client) + + volumeGroupsClient, err := volumegroups.NewVolumeGroupsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building VolumeGroups client: %+v", err) + } + configureFunc(volumeGroupsClient.Client) + + volumeQuotaRulesClient, err := volumequotarules.NewVolumeQuotaRulesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building VolumeQuotaRules client: %+v", err) + } + configureFunc(volumeQuotaRulesClient.Client) + + volumesClient, err := volumes.NewVolumesClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building Volumes client: %+v", err) + } + configureFunc(volumesClient.Client) + + volumesOnPremMigrationClient, err := volumesonpremmigration.NewVolumesOnPremMigrationClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building VolumesOnPremMigration client: %+v", err) + } + configureFunc(volumesOnPremMigrationClient.Client) + + volumesOnPremMigrationFinalizeClient, err := volumesonpremmigrationfinalize.NewVolumesOnPremMigrationFinalizeClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building VolumesOnPremMigrationFinalize client: %+v", err) + } + configureFunc(volumesOnPremMigrationFinalizeClient.Client) + + volumesRelocationClient, err := volumesrelocation.NewVolumesRelocationClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building VolumesRelocation client: %+v", err) + } + configureFunc(volumesRelocationClient.Client) + + volumesReplicationClient, err := volumesreplication.NewVolumesReplicationClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building VolumesReplication client: %+v", err) + } + configureFunc(volumesReplicationClient.Client) + + volumesRevertClient, err := volumesrevert.NewVolumesRevertClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building VolumesRevert client: %+v", err) + } + configureFunc(volumesRevertClient.Client) + + return &Client{ + BackupPolicy: backupPolicyClient, + BackupVaults: backupVaultsClient, + Backups: backupsClient, + CapacityPools: capacityPoolsClient, + FileLocks: fileLocksClient, + GroupIdListForLDAPUser: groupIdListForLDAPUserClient, + NetAppAccounts: netAppAccountsClient, + NetAppResource: netAppResourceClient, + PoolChange: poolChangeClient, + ResetCifsPassword: resetCifsPasswordClient, + Restore: restoreClient, + SnapshotPolicy: snapshotPolicyClient, + SnapshotPolicyListVolumes: snapshotPolicyListVolumesClient, + Snapshots: snapshotsClient, + SplitCloneVolume: splitCloneVolumeClient, + SubVolumes: subVolumesClient, + VolumeGroups: volumeGroupsClient, + VolumeQuotaRules: volumeQuotaRulesClient, + Volumes: volumesClient, + VolumesOnPremMigration: volumesOnPremMigrationClient, + VolumesOnPremMigrationFinalize: volumesOnPremMigrationFinalizeClient, + VolumesRelocation: volumesRelocationClient, + VolumesReplication: volumesReplicationClient, + VolumesRevert: volumesRevertClient, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/filelocks/README.md b/resource-manager/netapp/2025-06-01/filelocks/README.md new file mode 100644 index 00000000000..22d43da76c4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/filelocks/README.md @@ -0,0 +1,37 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/filelocks` Documentation + +The `filelocks` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/filelocks" +``` + + +### Client Initialization + +```go +client := filelocks.NewFileLocksClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `FileLocksClient.VolumesBreakFileLocks` + +```go +ctx := context.TODO() +id := filelocks.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := filelocks.BreakFileLocksRequest{ + // ... +} + + +if err := client.VolumesBreakFileLocksThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/filelocks/client.go b/resource-manager/netapp/2025-06-01/filelocks/client.go new file mode 100644 index 00000000000..56c8b4fc903 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/filelocks/client.go @@ -0,0 +1,26 @@ +package filelocks + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FileLocksClient struct { + Client *resourcemanager.Client +} + +func NewFileLocksClientWithBaseURI(sdkApi sdkEnv.Api) (*FileLocksClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "filelocks", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating FileLocksClient: %+v", err) + } + + return &FileLocksClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/filelocks/id_volume.go b/resource-manager/netapp/2025-06-01/filelocks/id_volume.go new file mode 100644 index 00000000000..660324eb77b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/filelocks/id_volume.go @@ -0,0 +1,148 @@ +package filelocks + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/filelocks/id_volume_test.go b/resource-manager/netapp/2025-06-01/filelocks/id_volume_test.go new file mode 100644 index 00000000000..1ae0d4861ac --- /dev/null +++ b/resource-manager/netapp/2025-06-01/filelocks/id_volume_test.go @@ -0,0 +1,372 @@ +package filelocks + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/filelocks/method_volumesbreakfilelocks.go b/resource-manager/netapp/2025-06-01/filelocks/method_volumesbreakfilelocks.go new file mode 100644 index 00000000000..c1ff2b4b86f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/filelocks/method_volumesbreakfilelocks.go @@ -0,0 +1,74 @@ +package filelocks + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesBreakFileLocksOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesBreakFileLocks ... +func (c FileLocksClient) VolumesBreakFileLocks(ctx context.Context, id VolumeId, input BreakFileLocksRequest) (result VolumesBreakFileLocksOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/breakFileLocks", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesBreakFileLocksThenPoll performs VolumesBreakFileLocks then polls until it's completed +func (c FileLocksClient) VolumesBreakFileLocksThenPoll(ctx context.Context, id VolumeId, input BreakFileLocksRequest) error { + result, err := c.VolumesBreakFileLocks(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesBreakFileLocks: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesBreakFileLocks: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/filelocks/model_breakfilelocksrequest.go b/resource-manager/netapp/2025-06-01/filelocks/model_breakfilelocksrequest.go new file mode 100644 index 00000000000..62fa9fc9808 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/filelocks/model_breakfilelocksrequest.go @@ -0,0 +1,9 @@ +package filelocks + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BreakFileLocksRequest struct { + ClientIP *string `json:"clientIp,omitempty"` + ConfirmRunningDisruptiveOperation *bool `json:"confirmRunningDisruptiveOperation,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/filelocks/version.go b/resource-manager/netapp/2025-06-01/filelocks/version.go new file mode 100644 index 00000000000..51769b90cae --- /dev/null +++ b/resource-manager/netapp/2025-06-01/filelocks/version.go @@ -0,0 +1,10 @@ +package filelocks + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/filelocks/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/README.md b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/README.md new file mode 100644 index 00000000000..1196a33e8ce --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/README.md @@ -0,0 +1,37 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/groupidlistforldapuser` Documentation + +The `groupidlistforldapuser` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/groupidlistforldapuser" +``` + + +### Client Initialization + +```go +client := groupidlistforldapuser.NewGroupIdListForLDAPUserClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `GroupIdListForLDAPUserClient.VolumesListGetGroupIdListForLdapUser` + +```go +ctx := context.TODO() +id := groupidlistforldapuser.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := groupidlistforldapuser.GetGroupIdListForLDAPUserRequest{ + // ... +} + + +if err := client.VolumesListGetGroupIdListForLdapUserThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/client.go b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/client.go new file mode 100644 index 00000000000..b098cba0063 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/client.go @@ -0,0 +1,26 @@ +package groupidlistforldapuser + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GroupIdListForLDAPUserClient struct { + Client *resourcemanager.Client +} + +func NewGroupIdListForLDAPUserClientWithBaseURI(sdkApi sdkEnv.Api) (*GroupIdListForLDAPUserClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "groupidlistforldapuser", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating GroupIdListForLDAPUserClient: %+v", err) + } + + return &GroupIdListForLDAPUserClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/id_volume.go b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/id_volume.go new file mode 100644 index 00000000000..597c3e852ab --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/id_volume.go @@ -0,0 +1,148 @@ +package groupidlistforldapuser + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/id_volume_test.go b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/id_volume_test.go new file mode 100644 index 00000000000..ad956e12e12 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/id_volume_test.go @@ -0,0 +1,372 @@ +package groupidlistforldapuser + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/method_volumeslistgetgroupidlistforldapuser.go b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/method_volumeslistgetgroupidlistforldapuser.go new file mode 100644 index 00000000000..9c2248727d2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/method_volumeslistgetgroupidlistforldapuser.go @@ -0,0 +1,75 @@ +package groupidlistforldapuser + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesListGetGroupIdListForLdapUserOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *GetGroupIdListForLDAPUserResponse +} + +// VolumesListGetGroupIdListForLdapUser ... +func (c GroupIdListForLDAPUserClient) VolumesListGetGroupIdListForLdapUser(ctx context.Context, id VolumeId, input GetGroupIdListForLDAPUserRequest) (result VolumesListGetGroupIdListForLdapUserOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/getGroupIdListForLdapUser", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesListGetGroupIdListForLdapUserThenPoll performs VolumesListGetGroupIdListForLdapUser then polls until it's completed +func (c GroupIdListForLDAPUserClient) VolumesListGetGroupIdListForLdapUserThenPoll(ctx context.Context, id VolumeId, input GetGroupIdListForLDAPUserRequest) error { + result, err := c.VolumesListGetGroupIdListForLdapUser(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesListGetGroupIdListForLdapUser: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesListGetGroupIdListForLdapUser: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/model_getgroupidlistforldapuserrequest.go b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/model_getgroupidlistforldapuserrequest.go new file mode 100644 index 00000000000..797ecd9aa2a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/model_getgroupidlistforldapuserrequest.go @@ -0,0 +1,8 @@ +package groupidlistforldapuser + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetGroupIdListForLDAPUserRequest struct { + Username string `json:"username"` +} diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/model_getgroupidlistforldapuserresponse.go b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/model_getgroupidlistforldapuserresponse.go new file mode 100644 index 00000000000..42c528df2f2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/model_getgroupidlistforldapuserresponse.go @@ -0,0 +1,8 @@ +package groupidlistforldapuser + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetGroupIdListForLDAPUserResponse struct { + GroupIdsForLdapUser *[]string `json:"groupIdsForLdapUser,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/groupidlistforldapuser/version.go b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/version.go new file mode 100644 index 00000000000..0dec5d02bf9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/groupidlistforldapuser/version.go @@ -0,0 +1,10 @@ +package groupidlistforldapuser + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/groupidlistforldapuser/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/README.md b/resource-manager/netapp/2025-06-01/netappaccounts/README.md new file mode 100644 index 00000000000..6e542225047 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/README.md @@ -0,0 +1,175 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/netappaccounts` Documentation + +The `netappaccounts` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/netappaccounts" +``` + + +### Client Initialization + +```go +client := netappaccounts.NewNetAppAccountsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `NetAppAccountsClient.AccountsChangeKeyVault` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +payload := netappaccounts.ChangeKeyVault{ + // ... +} + + +if err := client.AccountsChangeKeyVaultThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsCreateOrUpdate` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +payload := netappaccounts.NetAppAccount{ + // ... +} + + +if err := client.AccountsCreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsDelete` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +if err := client.AccountsDeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsGet` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +read, err := client.AccountsGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsGetChangeKeyVaultInformation` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +if err := client.AccountsGetChangeKeyVaultInformationThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsList` + +```go +ctx := context.TODO() +id := commonids.NewResourceGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group") + +// alternatively `client.AccountsList(ctx, id)` can be used to do batched pagination +items, err := client.AccountsListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsListBySubscription` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.AccountsListBySubscription(ctx, id)` can be used to do batched pagination +items, err := client.AccountsListBySubscriptionComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsRenewCredentials` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +if err := client.AccountsRenewCredentialsThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsTransitionToCmk` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +payload := netappaccounts.EncryptionTransitionRequest{ + // ... +} + + +if err := client.AccountsTransitionToCmkThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `NetAppAccountsClient.AccountsUpdate` + +```go +ctx := context.TODO() +id := netappaccounts.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +payload := netappaccounts.NetAppAccountPatch{ + // ... +} + + +if err := client.AccountsUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/client.go b/resource-manager/netapp/2025-06-01/netappaccounts/client.go new file mode 100644 index 00000000000..240204954a3 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/client.go @@ -0,0 +1,26 @@ +package netappaccounts + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NetAppAccountsClient struct { + Client *resourcemanager.Client +} + +func NewNetAppAccountsClientWithBaseURI(sdkApi sdkEnv.Api) (*NetAppAccountsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "netappaccounts", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating NetAppAccountsClient: %+v", err) + } + + return &NetAppAccountsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/constants.go b/resource-manager/netapp/2025-06-01/netappaccounts/constants.go new file mode 100644 index 00000000000..4c753e2c4e5 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/constants.go @@ -0,0 +1,192 @@ +package netappaccounts + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ActiveDirectoryStatus string + +const ( + ActiveDirectoryStatusCreated ActiveDirectoryStatus = "Created" + ActiveDirectoryStatusDeleted ActiveDirectoryStatus = "Deleted" + ActiveDirectoryStatusError ActiveDirectoryStatus = "Error" + ActiveDirectoryStatusInUse ActiveDirectoryStatus = "InUse" + ActiveDirectoryStatusUpdating ActiveDirectoryStatus = "Updating" +) + +func PossibleValuesForActiveDirectoryStatus() []string { + return []string{ + string(ActiveDirectoryStatusCreated), + string(ActiveDirectoryStatusDeleted), + string(ActiveDirectoryStatusError), + string(ActiveDirectoryStatusInUse), + string(ActiveDirectoryStatusUpdating), + } +} + +func (s *ActiveDirectoryStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseActiveDirectoryStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseActiveDirectoryStatus(input string) (*ActiveDirectoryStatus, error) { + vals := map[string]ActiveDirectoryStatus{ + "created": ActiveDirectoryStatusCreated, + "deleted": ActiveDirectoryStatusDeleted, + "error": ActiveDirectoryStatusError, + "inuse": ActiveDirectoryStatusInUse, + "updating": ActiveDirectoryStatusUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ActiveDirectoryStatus(input) + return &out, nil +} + +type KeySource string + +const ( + KeySourceMicrosoftPointKeyVault KeySource = "Microsoft.KeyVault" + KeySourceMicrosoftPointNetApp KeySource = "Microsoft.NetApp" +) + +func PossibleValuesForKeySource() []string { + return []string{ + string(KeySourceMicrosoftPointKeyVault), + string(KeySourceMicrosoftPointNetApp), + } +} + +func (s *KeySource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseKeySource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseKeySource(input string) (*KeySource, error) { + vals := map[string]KeySource{ + "microsoft.keyvault": KeySourceMicrosoftPointKeyVault, + "microsoft.netapp": KeySourceMicrosoftPointNetApp, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeySource(input) + return &out, nil +} + +type KeyVaultStatus string + +const ( + KeyVaultStatusCreated KeyVaultStatus = "Created" + KeyVaultStatusDeleted KeyVaultStatus = "Deleted" + KeyVaultStatusError KeyVaultStatus = "Error" + KeyVaultStatusInUse KeyVaultStatus = "InUse" + KeyVaultStatusUpdating KeyVaultStatus = "Updating" +) + +func PossibleValuesForKeyVaultStatus() []string { + return []string{ + string(KeyVaultStatusCreated), + string(KeyVaultStatusDeleted), + string(KeyVaultStatusError), + string(KeyVaultStatusInUse), + string(KeyVaultStatusUpdating), + } +} + +func (s *KeyVaultStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseKeyVaultStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseKeyVaultStatus(input string) (*KeyVaultStatus, error) { + vals := map[string]KeyVaultStatus{ + "created": KeyVaultStatusCreated, + "deleted": KeyVaultStatusDeleted, + "error": KeyVaultStatusError, + "inuse": KeyVaultStatusInUse, + "updating": KeyVaultStatusUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := KeyVaultStatus(input) + return &out, nil +} + +type MultiAdStatus string + +const ( + MultiAdStatusDisabled MultiAdStatus = "Disabled" + MultiAdStatusEnabled MultiAdStatus = "Enabled" +) + +func PossibleValuesForMultiAdStatus() []string { + return []string{ + string(MultiAdStatusDisabled), + string(MultiAdStatusEnabled), + } +} + +func (s *MultiAdStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMultiAdStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMultiAdStatus(input string) (*MultiAdStatus, error) { + vals := map[string]MultiAdStatus{ + "disabled": MultiAdStatusDisabled, + "enabled": MultiAdStatusEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MultiAdStatus(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/id_netappaccount.go b/resource-manager/netapp/2025-06-01/netappaccounts/id_netappaccount.go new file mode 100644 index 00000000000..e49b1665e3b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/id_netappaccount.go @@ -0,0 +1,130 @@ +package netappaccounts + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&NetAppAccountId{}) +} + +var _ resourceids.ResourceId = &NetAppAccountId{} + +// NetAppAccountId is a struct representing the Resource ID for a Net App Account +type NetAppAccountId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string +} + +// NewNetAppAccountID returns a new NetAppAccountId struct +func NewNetAppAccountID(subscriptionId string, resourceGroupName string, netAppAccountName string) NetAppAccountId { + return NetAppAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + } +} + +// ParseNetAppAccountID parses 'input' into a NetAppAccountId +func ParseNetAppAccountID(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseNetAppAccountIDInsensitively parses 'input' case-insensitively into a NetAppAccountId +// note: this method should only be used for API response data and not user input +func ParseNetAppAccountIDInsensitively(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *NetAppAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + return nil +} + +// ValidateNetAppAccountID checks that 'input' can be parsed as a Net App Account ID +func ValidateNetAppAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNetAppAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Net App Account ID +func (id NetAppAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Net App Account ID +func (id NetAppAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + } +} + +// String returns a human-readable description of this Net App Account ID +func (id NetAppAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + } + return fmt.Sprintf("Net App Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/id_netappaccount_test.go b/resource-manager/netapp/2025-06-01/netappaccounts/id_netappaccount_test.go new file mode 100644 index 00000000000..ef0ce3e4636 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/id_netappaccount_test.go @@ -0,0 +1,282 @@ +package netappaccounts + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &NetAppAccountId{} + +func TestNewNetAppAccountID(t *testing.T) { + id := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } +} + +func TestFormatNetAppAccountID(t *testing.T) { + actual := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseNetAppAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestParseNetAppAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestSegmentsForNetAppAccountId(t *testing.T) { + segments := NetAppAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("NetAppAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountschangekeyvault.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountschangekeyvault.go new file mode 100644 index 00000000000..6f6c8bb44e7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountschangekeyvault.go @@ -0,0 +1,73 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsChangeKeyVaultOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// AccountsChangeKeyVault ... +func (c NetAppAccountsClient) AccountsChangeKeyVault(ctx context.Context, id NetAppAccountId, input ChangeKeyVault) (result AccountsChangeKeyVaultOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/changeKeyVault", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AccountsChangeKeyVaultThenPoll performs AccountsChangeKeyVault then polls until it's completed +func (c NetAppAccountsClient) AccountsChangeKeyVaultThenPoll(ctx context.Context, id NetAppAccountId, input ChangeKeyVault) error { + result, err := c.AccountsChangeKeyVault(ctx, id, input) + if err != nil { + return fmt.Errorf("performing AccountsChangeKeyVault: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AccountsChangeKeyVault: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountscreateorupdate.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountscreateorupdate.go new file mode 100644 index 00000000000..42add8d5ac1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountscreateorupdate.go @@ -0,0 +1,75 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsCreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *NetAppAccount +} + +// AccountsCreateOrUpdate ... +func (c NetAppAccountsClient) AccountsCreateOrUpdate(ctx context.Context, id NetAppAccountId, input NetAppAccount) (result AccountsCreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AccountsCreateOrUpdateThenPoll performs AccountsCreateOrUpdate then polls until it's completed +func (c NetAppAccountsClient) AccountsCreateOrUpdateThenPoll(ctx context.Context, id NetAppAccountId, input NetAppAccount) error { + result, err := c.AccountsCreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing AccountsCreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AccountsCreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsdelete.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsdelete.go new file mode 100644 index 00000000000..ae46b7b061d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsdelete.go @@ -0,0 +1,70 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// AccountsDelete ... +func (c NetAppAccountsClient) AccountsDelete(ctx context.Context, id NetAppAccountId) (result AccountsDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AccountsDeleteThenPoll performs AccountsDelete then polls until it's completed +func (c NetAppAccountsClient) AccountsDeleteThenPoll(ctx context.Context, id NetAppAccountId) error { + result, err := c.AccountsDelete(ctx, id) + if err != nil { + return fmt.Errorf("performing AccountsDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AccountsDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsget.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsget.go new file mode 100644 index 00000000000..c1ec9504bf9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsget.go @@ -0,0 +1,53 @@ +package netappaccounts + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *NetAppAccount +} + +// AccountsGet ... +func (c NetAppAccountsClient) AccountsGet(ctx context.Context, id NetAppAccountId) (result AccountsGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model NetAppAccount + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsgetchangekeyvaultinformation.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsgetchangekeyvaultinformation.go new file mode 100644 index 00000000000..965d2d08ce3 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsgetchangekeyvaultinformation.go @@ -0,0 +1,71 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsGetChangeKeyVaultInformationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *GetKeyVaultStatusResponse +} + +// AccountsGetChangeKeyVaultInformation ... +func (c NetAppAccountsClient) AccountsGetChangeKeyVaultInformation(ctx context.Context, id NetAppAccountId) (result AccountsGetChangeKeyVaultInformationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/getKeyVaultStatus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AccountsGetChangeKeyVaultInformationThenPoll performs AccountsGetChangeKeyVaultInformation then polls until it's completed +func (c NetAppAccountsClient) AccountsGetChangeKeyVaultInformationThenPoll(ctx context.Context, id NetAppAccountId) error { + result, err := c.AccountsGetChangeKeyVaultInformation(ctx, id) + if err != nil { + return fmt.Errorf("performing AccountsGetChangeKeyVaultInformation: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AccountsGetChangeKeyVaultInformation: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountslist.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountslist.go new file mode 100644 index 00000000000..761a210fd34 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountslist.go @@ -0,0 +1,106 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]NetAppAccount +} + +type AccountsListCompleteResult struct { + LatestHttpResponse *http.Response + Items []NetAppAccount +} + +type AccountsListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *AccountsListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// AccountsList ... +func (c NetAppAccountsClient) AccountsList(ctx context.Context, id commonids.ResourceGroupId) (result AccountsListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &AccountsListCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.NetApp/netAppAccounts", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]NetAppAccount `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// AccountsListComplete retrieves all the results into a single object +func (c NetAppAccountsClient) AccountsListComplete(ctx context.Context, id commonids.ResourceGroupId) (AccountsListCompleteResult, error) { + return c.AccountsListCompleteMatchingPredicate(ctx, id, NetAppAccountOperationPredicate{}) +} + +// AccountsListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c NetAppAccountsClient) AccountsListCompleteMatchingPredicate(ctx context.Context, id commonids.ResourceGroupId, predicate NetAppAccountOperationPredicate) (result AccountsListCompleteResult, err error) { + items := make([]NetAppAccount, 0) + + resp, err := c.AccountsList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = AccountsListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountslistbysubscription.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountslistbysubscription.go new file mode 100644 index 00000000000..adea8a00480 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountslistbysubscription.go @@ -0,0 +1,106 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsListBySubscriptionOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]NetAppAccount +} + +type AccountsListBySubscriptionCompleteResult struct { + LatestHttpResponse *http.Response + Items []NetAppAccount +} + +type AccountsListBySubscriptionCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *AccountsListBySubscriptionCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// AccountsListBySubscription ... +func (c NetAppAccountsClient) AccountsListBySubscription(ctx context.Context, id commonids.SubscriptionId) (result AccountsListBySubscriptionOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &AccountsListBySubscriptionCustomPager{}, + Path: fmt.Sprintf("%s/providers/Microsoft.NetApp/netAppAccounts", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]NetAppAccount `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// AccountsListBySubscriptionComplete retrieves all the results into a single object +func (c NetAppAccountsClient) AccountsListBySubscriptionComplete(ctx context.Context, id commonids.SubscriptionId) (AccountsListBySubscriptionCompleteResult, error) { + return c.AccountsListBySubscriptionCompleteMatchingPredicate(ctx, id, NetAppAccountOperationPredicate{}) +} + +// AccountsListBySubscriptionCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c NetAppAccountsClient) AccountsListBySubscriptionCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate NetAppAccountOperationPredicate) (result AccountsListBySubscriptionCompleteResult, err error) { + items := make([]NetAppAccount, 0) + + resp, err := c.AccountsListBySubscription(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = AccountsListBySubscriptionCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsrenewcredentials.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsrenewcredentials.go new file mode 100644 index 00000000000..4d7aca0b151 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsrenewcredentials.go @@ -0,0 +1,70 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsRenewCredentialsOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// AccountsRenewCredentials ... +func (c NetAppAccountsClient) AccountsRenewCredentials(ctx context.Context, id NetAppAccountId) (result AccountsRenewCredentialsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/renewCredentials", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AccountsRenewCredentialsThenPoll performs AccountsRenewCredentials then polls until it's completed +func (c NetAppAccountsClient) AccountsRenewCredentialsThenPoll(ctx context.Context, id NetAppAccountId) error { + result, err := c.AccountsRenewCredentials(ctx, id) + if err != nil { + return fmt.Errorf("performing AccountsRenewCredentials: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AccountsRenewCredentials: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountstransitiontocmk.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountstransitiontocmk.go new file mode 100644 index 00000000000..bdbf147f365 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountstransitiontocmk.go @@ -0,0 +1,73 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsTransitionToCmkOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// AccountsTransitionToCmk ... +func (c NetAppAccountsClient) AccountsTransitionToCmk(ctx context.Context, id NetAppAccountId, input EncryptionTransitionRequest) (result AccountsTransitionToCmkOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/transitiontocmk", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AccountsTransitionToCmkThenPoll performs AccountsTransitionToCmk then polls until it's completed +func (c NetAppAccountsClient) AccountsTransitionToCmkThenPoll(ctx context.Context, id NetAppAccountId, input EncryptionTransitionRequest) error { + result, err := c.AccountsTransitionToCmk(ctx, id, input) + if err != nil { + return fmt.Errorf("performing AccountsTransitionToCmk: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AccountsTransitionToCmk: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsupdate.go b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsupdate.go new file mode 100644 index 00000000000..8957725808a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/method_accountsupdate.go @@ -0,0 +1,75 @@ +package netappaccounts + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountsUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *NetAppAccount +} + +// AccountsUpdate ... +func (c NetAppAccountsClient) AccountsUpdate(ctx context.Context, id NetAppAccountId, input NetAppAccountPatch) (result AccountsUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// AccountsUpdateThenPoll performs AccountsUpdate then polls until it's completed +func (c NetAppAccountsClient) AccountsUpdateThenPoll(ctx context.Context, id NetAppAccountId, input NetAppAccountPatch) error { + result, err := c.AccountsUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing AccountsUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after AccountsUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_accountencryption.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_accountencryption.go new file mode 100644 index 00000000000..93f1a662f77 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_accountencryption.go @@ -0,0 +1,10 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountEncryption struct { + Identity *EncryptionIdentity `json:"identity,omitempty"` + KeySource *KeySource `json:"keySource,omitempty"` + KeyVaultProperties *KeyVaultProperties `json:"keyVaultProperties,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_accountproperties.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_accountproperties.go new file mode 100644 index 00000000000..a20bf2f9ace --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_accountproperties.go @@ -0,0 +1,13 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AccountProperties struct { + ActiveDirectories *[]ActiveDirectory `json:"activeDirectories,omitempty"` + DisableShowmount *bool `json:"disableShowmount,omitempty"` + Encryption *AccountEncryption `json:"encryption,omitempty"` + MultiAdStatus *MultiAdStatus `json:"multiAdStatus,omitempty"` + NfsV4IDDomain *string `json:"nfsV4IDDomain,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_activedirectory.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_activedirectory.go new file mode 100644 index 00000000000..81bbf0d2927 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_activedirectory.go @@ -0,0 +1,30 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ActiveDirectory struct { + ActiveDirectoryId *string `json:"activeDirectoryId,omitempty"` + AdName *string `json:"adName,omitempty"` + Administrators *[]string `json:"administrators,omitempty"` + AesEncryption *bool `json:"aesEncryption,omitempty"` + AllowLocalNfsUsersWithLdap *bool `json:"allowLocalNfsUsersWithLdap,omitempty"` + BackupOperators *[]string `json:"backupOperators,omitempty"` + Dns *string `json:"dns,omitempty"` + Domain *string `json:"domain,omitempty"` + EncryptDCConnections *bool `json:"encryptDCConnections,omitempty"` + KdcIP *string `json:"kdcIP,omitempty"` + LdapOverTLS *bool `json:"ldapOverTLS,omitempty"` + LdapSearchScope *LdapSearchScopeOpt `json:"ldapSearchScope,omitempty"` + LdapSigning *bool `json:"ldapSigning,omitempty"` + OrganizationalUnit *string `json:"organizationalUnit,omitempty"` + Password *string `json:"password,omitempty"` + PreferredServersForLdapClient *string `json:"preferredServersForLdapClient,omitempty"` + SecurityOperators *[]string `json:"securityOperators,omitempty"` + ServerRootCACertificate *string `json:"serverRootCACertificate,omitempty"` + Site *string `json:"site,omitempty"` + SmbServerName *string `json:"smbServerName,omitempty"` + Status *ActiveDirectoryStatus `json:"status,omitempty"` + StatusDetails *string `json:"statusDetails,omitempty"` + Username *string `json:"username,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_changekeyvault.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_changekeyvault.go new file mode 100644 index 00000000000..a8533acf779 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_changekeyvault.go @@ -0,0 +1,11 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ChangeKeyVault struct { + KeyName string `json:"keyName"` + KeyVaultPrivateEndpoints []KeyVaultPrivateEndpoint `json:"keyVaultPrivateEndpoints"` + KeyVaultResourceId *string `json:"keyVaultResourceId,omitempty"` + KeyVaultUri string `json:"keyVaultUri"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_encryptionidentity.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_encryptionidentity.go new file mode 100644 index 00000000000..c17c5394495 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_encryptionidentity.go @@ -0,0 +1,10 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EncryptionIdentity struct { + FederatedClientId *string `json:"federatedClientId,omitempty"` + PrincipalId *string `json:"principalId,omitempty"` + UserAssignedIdentity *string `json:"userAssignedIdentity,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_encryptiontransitionrequest.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_encryptiontransitionrequest.go new file mode 100644 index 00000000000..6a8a88f0759 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_encryptiontransitionrequest.go @@ -0,0 +1,9 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EncryptionTransitionRequest struct { + PrivateEndpointId string `json:"privateEndpointId"` + VirtualNetworkId string `json:"virtualNetworkId"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_getkeyvaultstatusresponse.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_getkeyvaultstatusresponse.go new file mode 100644 index 00000000000..2a88234808f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_getkeyvaultstatusresponse.go @@ -0,0 +1,8 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetKeyVaultStatusResponse struct { + Properties *GetKeyVaultStatusResponseProperties `json:"properties,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_getkeyvaultstatusresponseproperties.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_getkeyvaultstatusresponseproperties.go new file mode 100644 index 00000000000..a69007ff3d6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_getkeyvaultstatusresponseproperties.go @@ -0,0 +1,11 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetKeyVaultStatusResponseProperties struct { + KeyName *string `json:"keyName,omitempty"` + KeyVaultPrivateEndpoints *[]KeyVaultPrivateEndpoint `json:"keyVaultPrivateEndpoints,omitempty"` + KeyVaultResourceId *string `json:"keyVaultResourceId,omitempty"` + KeyVaultUri *string `json:"keyVaultUri,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_keyvaultprivateendpoint.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_keyvaultprivateendpoint.go new file mode 100644 index 00000000000..4e90b3e666e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_keyvaultprivateendpoint.go @@ -0,0 +1,9 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KeyVaultPrivateEndpoint struct { + PrivateEndpointId *string `json:"privateEndpointId,omitempty"` + VirtualNetworkId *string `json:"virtualNetworkId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_keyvaultproperties.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_keyvaultproperties.go new file mode 100644 index 00000000000..f4722a948a2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_keyvaultproperties.go @@ -0,0 +1,12 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type KeyVaultProperties struct { + KeyName string `json:"keyName"` + KeyVaultId *string `json:"keyVaultId,omitempty"` + KeyVaultResourceId *string `json:"keyVaultResourceId,omitempty"` + KeyVaultUri string `json:"keyVaultUri"` + Status *KeyVaultStatus `json:"status,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_ldapsearchscopeopt.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_ldapsearchscopeopt.go new file mode 100644 index 00000000000..c476b0f0875 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_ldapsearchscopeopt.go @@ -0,0 +1,10 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type LdapSearchScopeOpt struct { + GroupDN *string `json:"groupDN,omitempty"` + GroupMembershipFilter *string `json:"groupMembershipFilter,omitempty"` + UserDN *string `json:"userDN,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_netappaccount.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_netappaccount.go new file mode 100644 index 00000000000..ae38663e126 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_netappaccount.go @@ -0,0 +1,21 @@ +package netappaccounts + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NetAppAccount struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *AccountProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/model_netappaccountpatch.go b/resource-manager/netapp/2025-06-01/netappaccounts/model_netappaccountpatch.go new file mode 100644 index 00000000000..9f24bca3d5a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/model_netappaccountpatch.go @@ -0,0 +1,18 @@ +package netappaccounts + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NetAppAccountPatch struct { + Id *string `json:"id,omitempty"` + Identity *identity.LegacySystemAndUserAssignedMap `json:"identity,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *AccountProperties `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/predicates.go b/resource-manager/netapp/2025-06-01/netappaccounts/predicates.go new file mode 100644 index 00000000000..ad2c0a55483 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/predicates.go @@ -0,0 +1,37 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NetAppAccountOperationPredicate struct { + Etag *string + Id *string + Location *string + Name *string + Type *string +} + +func (p NetAppAccountOperationPredicate) Matches(input NetAppAccount) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/netapp/2025-06-01/netappaccounts/version.go b/resource-manager/netapp/2025-06-01/netappaccounts/version.go new file mode 100644 index 00000000000..b56d319004d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappaccounts/version.go @@ -0,0 +1,10 @@ +package netappaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/netappaccounts/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/README.md b/resource-manager/netapp/2025-06-01/netappresource/README.md new file mode 100644 index 00000000000..38e4305ad98 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/README.md @@ -0,0 +1,236 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/netappresource` Documentation + +The `netappresource` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/netappresource" +``` + + +### Client Initialization + +```go +client := netappresource.NewNetAppResourceClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `NetAppResourceClient.CheckFilePathAvailability` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +payload := netappresource.FilePathAvailabilityRequest{ + // ... +} + + +read, err := client.CheckFilePathAvailability(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.CheckNameAvailability` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +payload := netappresource.ResourceNameAvailabilityRequest{ + // ... +} + + +read, err := client.CheckNameAvailability(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.CheckQuotaAvailability` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +payload := netappresource.QuotaAvailabilityRequest{ + // ... +} + + +read, err := client.CheckQuotaAvailability(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.QueryNetworkSiblingSet` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +payload := netappresource.QueryNetworkSiblingSetRequest{ + // ... +} + + +read, err := client.QueryNetworkSiblingSet(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.QueryRegionInfo` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +read, err := client.QueryRegionInfo(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.QuotaLimitsGet` + +```go +ctx := context.TODO() +id := netappresource.NewQuotaLimitID("12345678-1234-9876-4563-123456789012", "locationName", "quotaLimitName") + +read, err := client.QuotaLimitsGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.QuotaLimitsList` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.QuotaLimitsList(ctx, id)` can be used to do batched pagination +items, err := client.QuotaLimitsListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `NetAppResourceClient.RegionInfosGet` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +read, err := client.RegionInfosGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.RegionInfosList` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.RegionInfosList(ctx, id)` can be used to do batched pagination +items, err := client.RegionInfosListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `NetAppResourceClient.UpdateNetworkSiblingSet` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +payload := netappresource.UpdateNetworkSiblingSetRequest{ + // ... +} + + +if err := client.UpdateNetworkSiblingSetThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `NetAppResourceClient.UsagesGet` + +```go +ctx := context.TODO() +id := netappresource.NewUsageID("12345678-1234-9876-4563-123456789012", "locationName", "usageName") + +read, err := client.UsagesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `NetAppResourceClient.UsagesList` + +```go +ctx := context.TODO() +id := netappresource.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.UsagesList(ctx, id)` can be used to do batched pagination +items, err := client.UsagesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/netapp/2025-06-01/netappresource/client.go b/resource-manager/netapp/2025-06-01/netappresource/client.go new file mode 100644 index 00000000000..0bfc3f358ce --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/client.go @@ -0,0 +1,26 @@ +package netappresource + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NetAppResourceClient struct { + Client *resourcemanager.Client +} + +func NewNetAppResourceClientWithBaseURI(sdkApi sdkEnv.Api) (*NetAppResourceClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "netappresource", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating NetAppResourceClient: %+v", err) + } + + return &NetAppResourceClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/constants.go b/resource-manager/netapp/2025-06-01/netappresource/constants.go new file mode 100644 index 00000000000..5f4e36630e6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/constants.go @@ -0,0 +1,298 @@ +package netappresource + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CheckNameResourceTypes string + +const ( + CheckNameResourceTypesMicrosoftPointNetAppNetAppAccounts CheckNameResourceTypes = "Microsoft.NetApp/netAppAccounts" + CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPools CheckNameResourceTypes = "Microsoft.NetApp/netAppAccounts/capacityPools" + CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumes CheckNameResourceTypes = "Microsoft.NetApp/netAppAccounts/capacityPools/volumes" + CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumesSnapshots CheckNameResourceTypes = "Microsoft.NetApp/netAppAccounts/capacityPools/volumes/snapshots" +) + +func PossibleValuesForCheckNameResourceTypes() []string { + return []string{ + string(CheckNameResourceTypesMicrosoftPointNetAppNetAppAccounts), + string(CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPools), + string(CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumes), + string(CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumesSnapshots), + } +} + +func (s *CheckNameResourceTypes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCheckNameResourceTypes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCheckNameResourceTypes(input string) (*CheckNameResourceTypes, error) { + vals := map[string]CheckNameResourceTypes{ + "microsoft.netapp/netappaccounts": CheckNameResourceTypesMicrosoftPointNetAppNetAppAccounts, + "microsoft.netapp/netappaccounts/capacitypools": CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPools, + "microsoft.netapp/netappaccounts/capacitypools/volumes": CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumes, + "microsoft.netapp/netappaccounts/capacitypools/volumes/snapshots": CheckNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumesSnapshots, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CheckNameResourceTypes(input) + return &out, nil +} + +type CheckQuotaNameResourceTypes string + +const ( + CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccounts CheckQuotaNameResourceTypes = "Microsoft.NetApp/netAppAccounts" + CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPools CheckQuotaNameResourceTypes = "Microsoft.NetApp/netAppAccounts/capacityPools" + CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumes CheckQuotaNameResourceTypes = "Microsoft.NetApp/netAppAccounts/capacityPools/volumes" + CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumesSnapshots CheckQuotaNameResourceTypes = "Microsoft.NetApp/netAppAccounts/capacityPools/volumes/snapshots" +) + +func PossibleValuesForCheckQuotaNameResourceTypes() []string { + return []string{ + string(CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccounts), + string(CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPools), + string(CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumes), + string(CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumesSnapshots), + } +} + +func (s *CheckQuotaNameResourceTypes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCheckQuotaNameResourceTypes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCheckQuotaNameResourceTypes(input string) (*CheckQuotaNameResourceTypes, error) { + vals := map[string]CheckQuotaNameResourceTypes{ + "microsoft.netapp/netappaccounts": CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccounts, + "microsoft.netapp/netappaccounts/capacitypools": CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPools, + "microsoft.netapp/netappaccounts/capacitypools/volumes": CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumes, + "microsoft.netapp/netappaccounts/capacitypools/volumes/snapshots": CheckQuotaNameResourceTypesMicrosoftPointNetAppNetAppAccountsCapacityPoolsVolumesSnapshots, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CheckQuotaNameResourceTypes(input) + return &out, nil +} + +type InAvailabilityReasonType string + +const ( + InAvailabilityReasonTypeAlreadyExists InAvailabilityReasonType = "AlreadyExists" + InAvailabilityReasonTypeInvalid InAvailabilityReasonType = "Invalid" +) + +func PossibleValuesForInAvailabilityReasonType() []string { + return []string{ + string(InAvailabilityReasonTypeAlreadyExists), + string(InAvailabilityReasonTypeInvalid), + } +} + +func (s *InAvailabilityReasonType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseInAvailabilityReasonType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseInAvailabilityReasonType(input string) (*InAvailabilityReasonType, error) { + vals := map[string]InAvailabilityReasonType{ + "alreadyexists": InAvailabilityReasonTypeAlreadyExists, + "invalid": InAvailabilityReasonTypeInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := InAvailabilityReasonType(input) + return &out, nil +} + +type NetworkFeatures string + +const ( + NetworkFeaturesBasic NetworkFeatures = "Basic" + NetworkFeaturesBasicStandard NetworkFeatures = "Basic_Standard" + NetworkFeaturesStandard NetworkFeatures = "Standard" + NetworkFeaturesStandardBasic NetworkFeatures = "Standard_Basic" +) + +func PossibleValuesForNetworkFeatures() []string { + return []string{ + string(NetworkFeaturesBasic), + string(NetworkFeaturesBasicStandard), + string(NetworkFeaturesStandard), + string(NetworkFeaturesStandardBasic), + } +} + +func (s *NetworkFeatures) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkFeatures(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkFeatures(input string) (*NetworkFeatures, error) { + vals := map[string]NetworkFeatures{ + "basic": NetworkFeaturesBasic, + "basic_standard": NetworkFeaturesBasicStandard, + "standard": NetworkFeaturesStandard, + "standard_basic": NetworkFeaturesStandardBasic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkFeatures(input) + return &out, nil +} + +type NetworkSiblingSetProvisioningState string + +const ( + NetworkSiblingSetProvisioningStateCanceled NetworkSiblingSetProvisioningState = "Canceled" + NetworkSiblingSetProvisioningStateFailed NetworkSiblingSetProvisioningState = "Failed" + NetworkSiblingSetProvisioningStateSucceeded NetworkSiblingSetProvisioningState = "Succeeded" + NetworkSiblingSetProvisioningStateUpdating NetworkSiblingSetProvisioningState = "Updating" +) + +func PossibleValuesForNetworkSiblingSetProvisioningState() []string { + return []string{ + string(NetworkSiblingSetProvisioningStateCanceled), + string(NetworkSiblingSetProvisioningStateFailed), + string(NetworkSiblingSetProvisioningStateSucceeded), + string(NetworkSiblingSetProvisioningStateUpdating), + } +} + +func (s *NetworkSiblingSetProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkSiblingSetProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkSiblingSetProvisioningState(input string) (*NetworkSiblingSetProvisioningState, error) { + vals := map[string]NetworkSiblingSetProvisioningState{ + "canceled": NetworkSiblingSetProvisioningStateCanceled, + "failed": NetworkSiblingSetProvisioningStateFailed, + "succeeded": NetworkSiblingSetProvisioningStateSucceeded, + "updating": NetworkSiblingSetProvisioningStateUpdating, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkSiblingSetProvisioningState(input) + return &out, nil +} + +type RegionStorageToNetworkProximity string + +const ( + RegionStorageToNetworkProximityAcrossTTwo RegionStorageToNetworkProximity = "AcrossT2" + RegionStorageToNetworkProximityDefault RegionStorageToNetworkProximity = "Default" + RegionStorageToNetworkProximityTOne RegionStorageToNetworkProximity = "T1" + RegionStorageToNetworkProximityTOneAndAcrossTTwo RegionStorageToNetworkProximity = "T1AndAcrossT2" + RegionStorageToNetworkProximityTOneAndTTwo RegionStorageToNetworkProximity = "T1AndT2" + RegionStorageToNetworkProximityTOneAndTTwoAndAcrossTTwo RegionStorageToNetworkProximity = "T1AndT2AndAcrossT2" + RegionStorageToNetworkProximityTTwo RegionStorageToNetworkProximity = "T2" + RegionStorageToNetworkProximityTTwoAndAcrossTTwo RegionStorageToNetworkProximity = "T2AndAcrossT2" +) + +func PossibleValuesForRegionStorageToNetworkProximity() []string { + return []string{ + string(RegionStorageToNetworkProximityAcrossTTwo), + string(RegionStorageToNetworkProximityDefault), + string(RegionStorageToNetworkProximityTOne), + string(RegionStorageToNetworkProximityTOneAndAcrossTTwo), + string(RegionStorageToNetworkProximityTOneAndTTwo), + string(RegionStorageToNetworkProximityTOneAndTTwoAndAcrossTTwo), + string(RegionStorageToNetworkProximityTTwo), + string(RegionStorageToNetworkProximityTTwoAndAcrossTTwo), + } +} + +func (s *RegionStorageToNetworkProximity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseRegionStorageToNetworkProximity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseRegionStorageToNetworkProximity(input string) (*RegionStorageToNetworkProximity, error) { + vals := map[string]RegionStorageToNetworkProximity{ + "acrosst2": RegionStorageToNetworkProximityAcrossTTwo, + "default": RegionStorageToNetworkProximityDefault, + "t1": RegionStorageToNetworkProximityTOne, + "t1andacrosst2": RegionStorageToNetworkProximityTOneAndAcrossTTwo, + "t1andt2": RegionStorageToNetworkProximityTOneAndTTwo, + "t1andt2andacrosst2": RegionStorageToNetworkProximityTOneAndTTwoAndAcrossTTwo, + "t2": RegionStorageToNetworkProximityTTwo, + "t2andacrosst2": RegionStorageToNetworkProximityTTwoAndAcrossTTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := RegionStorageToNetworkProximity(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/id_location.go b/resource-manager/netapp/2025-06-01/netappresource/id_location.go new file mode 100644 index 00000000000..7f5f70c69df --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/id_location.go @@ -0,0 +1,121 @@ +package netappresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&LocationId{}) +} + +var _ resourceids.ResourceId = &LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + LocationName string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *LocationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + return nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.NetApp/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/id_location_test.go b/resource-manager/netapp/2025-06-01/netappresource/id_location_test.go new file mode 100644 index 00000000000..6e374aa4ecb --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/id_location_test.go @@ -0,0 +1,237 @@ +package netappresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/id_quotalimit.go b/resource-manager/netapp/2025-06-01/netappresource/id_quotalimit.go new file mode 100644 index 00000000000..b35ac4de378 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/id_quotalimit.go @@ -0,0 +1,130 @@ +package netappresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&QuotaLimitId{}) +} + +var _ resourceids.ResourceId = &QuotaLimitId{} + +// QuotaLimitId is a struct representing the Resource ID for a Quota Limit +type QuotaLimitId struct { + SubscriptionId string + LocationName string + QuotaLimitName string +} + +// NewQuotaLimitID returns a new QuotaLimitId struct +func NewQuotaLimitID(subscriptionId string, locationName string, quotaLimitName string) QuotaLimitId { + return QuotaLimitId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + QuotaLimitName: quotaLimitName, + } +} + +// ParseQuotaLimitID parses 'input' into a QuotaLimitId +func ParseQuotaLimitID(input string) (*QuotaLimitId, error) { + parser := resourceids.NewParserFromResourceIdType(&QuotaLimitId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := QuotaLimitId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseQuotaLimitIDInsensitively parses 'input' case-insensitively into a QuotaLimitId +// note: this method should only be used for API response data and not user input +func ParseQuotaLimitIDInsensitively(input string) (*QuotaLimitId, error) { + parser := resourceids.NewParserFromResourceIdType(&QuotaLimitId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := QuotaLimitId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *QuotaLimitId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + if id.QuotaLimitName, ok = input.Parsed["quotaLimitName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "quotaLimitName", input) + } + + return nil +} + +// ValidateQuotaLimitID checks that 'input' can be parsed as a Quota Limit ID +func ValidateQuotaLimitID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseQuotaLimitID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Quota Limit ID +func (id QuotaLimitId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.NetApp/locations/%s/quotaLimits/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName, id.QuotaLimitName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Quota Limit ID +func (id QuotaLimitId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + resourceids.StaticSegment("staticQuotaLimits", "quotaLimits", "quotaLimits"), + resourceids.UserSpecifiedSegment("quotaLimitName", "quotaLimitName"), + } +} + +// String returns a human-readable description of this Quota Limit ID +func (id QuotaLimitId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + fmt.Sprintf("Quota Limit Name: %q", id.QuotaLimitName), + } + return fmt.Sprintf("Quota Limit (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/id_quotalimit_test.go b/resource-manager/netapp/2025-06-01/netappresource/id_quotalimit_test.go new file mode 100644 index 00000000000..3d154522c0a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/id_quotalimit_test.go @@ -0,0 +1,282 @@ +package netappresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &QuotaLimitId{} + +func TestNewQuotaLimitID(t *testing.T) { + id := NewQuotaLimitID("12345678-1234-9876-4563-123456789012", "locationName", "quotaLimitName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } + + if id.QuotaLimitName != "quotaLimitName" { + t.Fatalf("Expected %q but got %q for Segment 'QuotaLimitName'", id.QuotaLimitName, "quotaLimitName") + } +} + +func TestFormatQuotaLimitID(t *testing.T) { + actual := NewQuotaLimitID("12345678-1234-9876-4563-123456789012", "locationName", "quotaLimitName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/quotaLimits/quotaLimitName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseQuotaLimitID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *QuotaLimitId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/quotaLimits", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/quotaLimits/quotaLimitName", + Expected: &QuotaLimitId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + QuotaLimitName: "quotaLimitName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/quotaLimits/quotaLimitName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseQuotaLimitID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + if actual.QuotaLimitName != v.Expected.QuotaLimitName { + t.Fatalf("Expected %q but got %q for QuotaLimitName", v.Expected.QuotaLimitName, actual.QuotaLimitName) + } + + } +} + +func TestParseQuotaLimitIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *QuotaLimitId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/quotaLimits", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE/qUoTaLiMiTs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/quotaLimits/quotaLimitName", + Expected: &QuotaLimitId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + QuotaLimitName: "quotaLimitName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/quotaLimits/quotaLimitName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE/qUoTaLiMiTs/qUoTaLiMiTnAmE", + Expected: &QuotaLimitId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + QuotaLimitName: "qUoTaLiMiTnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE/qUoTaLiMiTs/qUoTaLiMiTnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseQuotaLimitIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + if actual.QuotaLimitName != v.Expected.QuotaLimitName { + t.Fatalf("Expected %q but got %q for QuotaLimitName", v.Expected.QuotaLimitName, actual.QuotaLimitName) + } + + } +} + +func TestSegmentsForQuotaLimitId(t *testing.T) { + segments := QuotaLimitId{}.Segments() + if len(segments) == 0 { + t.Fatalf("QuotaLimitId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/id_usage.go b/resource-manager/netapp/2025-06-01/netappresource/id_usage.go new file mode 100644 index 00000000000..3681d77d4ef --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/id_usage.go @@ -0,0 +1,130 @@ +package netappresource + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&UsageId{}) +} + +var _ resourceids.ResourceId = &UsageId{} + +// UsageId is a struct representing the Resource ID for a Usage +type UsageId struct { + SubscriptionId string + LocationName string + UsageName string +} + +// NewUsageID returns a new UsageId struct +func NewUsageID(subscriptionId string, locationName string, usageName string) UsageId { + return UsageId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + UsageName: usageName, + } +} + +// ParseUsageID parses 'input' into a UsageId +func ParseUsageID(input string) (*UsageId, error) { + parser := resourceids.NewParserFromResourceIdType(&UsageId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := UsageId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseUsageIDInsensitively parses 'input' case-insensitively into a UsageId +// note: this method should only be used for API response data and not user input +func ParseUsageIDInsensitively(input string) (*UsageId, error) { + parser := resourceids.NewParserFromResourceIdType(&UsageId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := UsageId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *UsageId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + if id.UsageName, ok = input.Parsed["usageName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "usageName", input) + } + + return nil +} + +// ValidateUsageID checks that 'input' can be parsed as a Usage ID +func ValidateUsageID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseUsageID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Usage ID +func (id UsageId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.NetApp/locations/%s/usages/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName, id.UsageName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Usage ID +func (id UsageId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + resourceids.StaticSegment("staticUsages", "usages", "usages"), + resourceids.UserSpecifiedSegment("usageName", "usageName"), + } +} + +// String returns a human-readable description of this Usage ID +func (id UsageId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + fmt.Sprintf("Usage Name: %q", id.UsageName), + } + return fmt.Sprintf("Usage (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/id_usage_test.go b/resource-manager/netapp/2025-06-01/netappresource/id_usage_test.go new file mode 100644 index 00000000000..0f8f3835ea6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/id_usage_test.go @@ -0,0 +1,282 @@ +package netappresource + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &UsageId{} + +func TestNewUsageID(t *testing.T) { + id := NewUsageID("12345678-1234-9876-4563-123456789012", "locationName", "usageName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } + + if id.UsageName != "usageName" { + t.Fatalf("Expected %q but got %q for Segment 'UsageName'", id.UsageName, "usageName") + } +} + +func TestFormatUsageID(t *testing.T) { + actual := NewUsageID("12345678-1234-9876-4563-123456789012", "locationName", "usageName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/usages/usageName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseUsageID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *UsageId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/usages", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/usages/usageName", + Expected: &UsageId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + UsageName: "usageName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/usages/usageName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseUsageID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + if actual.UsageName != v.Expected.UsageName { + t.Fatalf("Expected %q but got %q for UsageName", v.Expected.UsageName, actual.UsageName) + } + + } +} + +func TestParseUsageIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *UsageId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/usages", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE/uSaGeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/usages/usageName", + Expected: &UsageId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + UsageName: "usageName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.NetApp/locations/locationName/usages/usageName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE/uSaGeS/uSaGeNaMe", + Expected: &UsageId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + UsageName: "uSaGeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtApP/lOcAtIoNs/lOcAtIoNnAmE/uSaGeS/uSaGeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseUsageIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + if actual.UsageName != v.Expected.UsageName { + t.Fatalf("Expected %q but got %q for UsageName", v.Expected.UsageName, actual.UsageName) + } + + } +} + +func TestSegmentsForUsageId(t *testing.T) { + segments := UsageId{}.Segments() + if len(segments) == 0 { + t.Fatalf("UsageId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_checkfilepathavailability.go b/resource-manager/netapp/2025-06-01/netappresource/method_checkfilepathavailability.go new file mode 100644 index 00000000000..fcd3daa5d29 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_checkfilepathavailability.go @@ -0,0 +1,58 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CheckFilePathAvailabilityOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *CheckAvailabilityResponse +} + +// CheckFilePathAvailability ... +func (c NetAppResourceClient) CheckFilePathAvailability(ctx context.Context, id LocationId, input FilePathAvailabilityRequest) (result CheckFilePathAvailabilityOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkFilePathAvailability", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model CheckAvailabilityResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_checknameavailability.go b/resource-manager/netapp/2025-06-01/netappresource/method_checknameavailability.go new file mode 100644 index 00000000000..788eb6035e5 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_checknameavailability.go @@ -0,0 +1,58 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CheckNameAvailabilityOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *CheckAvailabilityResponse +} + +// CheckNameAvailability ... +func (c NetAppResourceClient) CheckNameAvailability(ctx context.Context, id LocationId, input ResourceNameAvailabilityRequest) (result CheckNameAvailabilityOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkNameAvailability", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model CheckAvailabilityResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_checkquotaavailability.go b/resource-manager/netapp/2025-06-01/netappresource/method_checkquotaavailability.go new file mode 100644 index 00000000000..75b1541b588 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_checkquotaavailability.go @@ -0,0 +1,58 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CheckQuotaAvailabilityOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *CheckAvailabilityResponse +} + +// CheckQuotaAvailability ... +func (c NetAppResourceClient) CheckQuotaAvailability(ctx context.Context, id LocationId, input QuotaAvailabilityRequest) (result CheckQuotaAvailabilityOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/checkQuotaAvailability", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model CheckAvailabilityResponse + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_querynetworksiblingset.go b/resource-manager/netapp/2025-06-01/netappresource/method_querynetworksiblingset.go new file mode 100644 index 00000000000..1777bea9e98 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_querynetworksiblingset.go @@ -0,0 +1,58 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryNetworkSiblingSetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *NetworkSiblingSet +} + +// QueryNetworkSiblingSet ... +func (c NetAppResourceClient) QueryNetworkSiblingSet(ctx context.Context, id LocationId, input QueryNetworkSiblingSetRequest) (result QueryNetworkSiblingSetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/queryNetworkSiblingSet", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model NetworkSiblingSet + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_queryregioninfo.go b/resource-manager/netapp/2025-06-01/netappresource/method_queryregioninfo.go new file mode 100644 index 00000000000..dc45f121299 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_queryregioninfo.go @@ -0,0 +1,54 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryRegionInfoOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *RegionInfo +} + +// QueryRegionInfo ... +func (c NetAppResourceClient) QueryRegionInfo(ctx context.Context, id LocationId) (result QueryRegionInfoOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/regionInfo", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model RegionInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_quotalimitsget.go b/resource-manager/netapp/2025-06-01/netappresource/method_quotalimitsget.go new file mode 100644 index 00000000000..cd1492d82a8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_quotalimitsget.go @@ -0,0 +1,53 @@ +package netappresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QuotaLimitsGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SubscriptionQuotaItem +} + +// QuotaLimitsGet ... +func (c NetAppResourceClient) QuotaLimitsGet(ctx context.Context, id QuotaLimitId) (result QuotaLimitsGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SubscriptionQuotaItem + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_quotalimitslist.go b/resource-manager/netapp/2025-06-01/netappresource/method_quotalimitslist.go new file mode 100644 index 00000000000..4fe85e2ac3c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_quotalimitslist.go @@ -0,0 +1,105 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QuotaLimitsListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]SubscriptionQuotaItem +} + +type QuotaLimitsListCompleteResult struct { + LatestHttpResponse *http.Response + Items []SubscriptionQuotaItem +} + +type QuotaLimitsListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *QuotaLimitsListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// QuotaLimitsList ... +func (c NetAppResourceClient) QuotaLimitsList(ctx context.Context, id LocationId) (result QuotaLimitsListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &QuotaLimitsListCustomPager{}, + Path: fmt.Sprintf("%s/quotaLimits", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]SubscriptionQuotaItem `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// QuotaLimitsListComplete retrieves all the results into a single object +func (c NetAppResourceClient) QuotaLimitsListComplete(ctx context.Context, id LocationId) (QuotaLimitsListCompleteResult, error) { + return c.QuotaLimitsListCompleteMatchingPredicate(ctx, id, SubscriptionQuotaItemOperationPredicate{}) +} + +// QuotaLimitsListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c NetAppResourceClient) QuotaLimitsListCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate SubscriptionQuotaItemOperationPredicate) (result QuotaLimitsListCompleteResult, err error) { + items := make([]SubscriptionQuotaItem, 0) + + resp, err := c.QuotaLimitsList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = QuotaLimitsListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_regioninfosget.go b/resource-manager/netapp/2025-06-01/netappresource/method_regioninfosget.go new file mode 100644 index 00000000000..89e4991022d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_regioninfosget.go @@ -0,0 +1,54 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegionInfosGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *RegionInfoResource +} + +// RegionInfosGet ... +func (c NetAppResourceClient) RegionInfosGet(ctx context.Context, id LocationId) (result RegionInfosGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/regionInfos/default", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model RegionInfoResource + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_regioninfoslist.go b/resource-manager/netapp/2025-06-01/netappresource/method_regioninfoslist.go new file mode 100644 index 00000000000..87038bafe2a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_regioninfoslist.go @@ -0,0 +1,105 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegionInfosListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]RegionInfoResource +} + +type RegionInfosListCompleteResult struct { + LatestHttpResponse *http.Response + Items []RegionInfoResource +} + +type RegionInfosListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *RegionInfosListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// RegionInfosList ... +func (c NetAppResourceClient) RegionInfosList(ctx context.Context, id LocationId) (result RegionInfosListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &RegionInfosListCustomPager{}, + Path: fmt.Sprintf("%s/regionInfos", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]RegionInfoResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// RegionInfosListComplete retrieves all the results into a single object +func (c NetAppResourceClient) RegionInfosListComplete(ctx context.Context, id LocationId) (RegionInfosListCompleteResult, error) { + return c.RegionInfosListCompleteMatchingPredicate(ctx, id, RegionInfoResourceOperationPredicate{}) +} + +// RegionInfosListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c NetAppResourceClient) RegionInfosListCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate RegionInfoResourceOperationPredicate) (result RegionInfosListCompleteResult, err error) { + items := make([]RegionInfoResource, 0) + + resp, err := c.RegionInfosList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = RegionInfosListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_updatenetworksiblingset.go b/resource-manager/netapp/2025-06-01/netappresource/method_updatenetworksiblingset.go new file mode 100644 index 00000000000..47e6dd6cba8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_updatenetworksiblingset.go @@ -0,0 +1,75 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateNetworkSiblingSetOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *NetworkSiblingSet +} + +// UpdateNetworkSiblingSet ... +func (c NetAppResourceClient) UpdateNetworkSiblingSet(ctx context.Context, id LocationId, input UpdateNetworkSiblingSetRequest) (result UpdateNetworkSiblingSetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/updateNetworkSiblingSet", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateNetworkSiblingSetThenPoll performs UpdateNetworkSiblingSet then polls until it's completed +func (c NetAppResourceClient) UpdateNetworkSiblingSetThenPoll(ctx context.Context, id LocationId, input UpdateNetworkSiblingSetRequest) error { + result, err := c.UpdateNetworkSiblingSet(ctx, id, input) + if err != nil { + return fmt.Errorf("performing UpdateNetworkSiblingSet: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after UpdateNetworkSiblingSet: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_usagesget.go b/resource-manager/netapp/2025-06-01/netappresource/method_usagesget.go new file mode 100644 index 00000000000..966143d9e2b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_usagesget.go @@ -0,0 +1,53 @@ +package netappresource + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UsagesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *UsageResult +} + +// UsagesGet ... +func (c NetAppResourceClient) UsagesGet(ctx context.Context, id UsageId) (result UsagesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model UsageResult + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/method_usageslist.go b/resource-manager/netapp/2025-06-01/netappresource/method_usageslist.go new file mode 100644 index 00000000000..2a4cd41497e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/method_usageslist.go @@ -0,0 +1,105 @@ +package netappresource + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UsagesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]UsageResult +} + +type UsagesListCompleteResult struct { + LatestHttpResponse *http.Response + Items []UsageResult +} + +type UsagesListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *UsagesListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// UsagesList ... +func (c NetAppResourceClient) UsagesList(ctx context.Context, id LocationId) (result UsagesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &UsagesListCustomPager{}, + Path: fmt.Sprintf("%s/usages", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]UsageResult `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// UsagesListComplete retrieves all the results into a single object +func (c NetAppResourceClient) UsagesListComplete(ctx context.Context, id LocationId) (UsagesListCompleteResult, error) { + return c.UsagesListCompleteMatchingPredicate(ctx, id, UsageResultOperationPredicate{}) +} + +// UsagesListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c NetAppResourceClient) UsagesListCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate UsageResultOperationPredicate) (result UsagesListCompleteResult, err error) { + items := make([]UsageResult, 0) + + resp, err := c.UsagesList(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = UsagesListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_checkavailabilityresponse.go b/resource-manager/netapp/2025-06-01/netappresource/model_checkavailabilityresponse.go new file mode 100644 index 00000000000..6423d9de076 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_checkavailabilityresponse.go @@ -0,0 +1,10 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CheckAvailabilityResponse struct { + IsAvailable *bool `json:"isAvailable,omitempty"` + Message *string `json:"message,omitempty"` + Reason *InAvailabilityReasonType `json:"reason,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_filepathavailabilityrequest.go b/resource-manager/netapp/2025-06-01/netappresource/model_filepathavailabilityrequest.go new file mode 100644 index 00000000000..41575ebe014 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_filepathavailabilityrequest.go @@ -0,0 +1,10 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type FilePathAvailabilityRequest struct { + AvailabilityZone *string `json:"availabilityZone,omitempty"` + Name string `json:"name"` + SubnetId string `json:"subnetId"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_networksiblingset.go b/resource-manager/netapp/2025-06-01/netappresource/model_networksiblingset.go new file mode 100644 index 00000000000..6873f71c613 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_networksiblingset.go @@ -0,0 +1,13 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NetworkSiblingSet struct { + NetworkFeatures *NetworkFeatures `json:"networkFeatures,omitempty"` + NetworkSiblingSetId *string `json:"networkSiblingSetId,omitempty"` + NetworkSiblingSetStateId *string `json:"networkSiblingSetStateId,omitempty"` + NicInfoList *[]NicInfo `json:"nicInfoList,omitempty"` + ProvisioningState *NetworkSiblingSetProvisioningState `json:"provisioningState,omitempty"` + SubnetId *string `json:"subnetId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_nicinfo.go b/resource-manager/netapp/2025-06-01/netappresource/model_nicinfo.go new file mode 100644 index 00000000000..5ee50025a75 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_nicinfo.go @@ -0,0 +1,9 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NicInfo struct { + IPAddress *string `json:"ipAddress,omitempty"` + VolumeResourceIds *[]string `json:"volumeResourceIds,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_querynetworksiblingsetrequest.go b/resource-manager/netapp/2025-06-01/netappresource/model_querynetworksiblingsetrequest.go new file mode 100644 index 00000000000..35c102048c3 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_querynetworksiblingsetrequest.go @@ -0,0 +1,9 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QueryNetworkSiblingSetRequest struct { + NetworkSiblingSetId string `json:"networkSiblingSetId"` + SubnetId string `json:"subnetId"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_quotaavailabilityrequest.go b/resource-manager/netapp/2025-06-01/netappresource/model_quotaavailabilityrequest.go new file mode 100644 index 00000000000..d8df9ac4cef --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_quotaavailabilityrequest.go @@ -0,0 +1,10 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type QuotaAvailabilityRequest struct { + Name string `json:"name"` + ResourceGroup string `json:"resourceGroup"` + Type CheckQuotaNameResourceTypes `json:"type"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_regioninfo.go b/resource-manager/netapp/2025-06-01/netappresource/model_regioninfo.go new file mode 100644 index 00000000000..63c92a72c98 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_regioninfo.go @@ -0,0 +1,9 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegionInfo struct { + AvailabilityZoneMappings *[]RegionInfoAvailabilityZoneMappingsInlined `json:"availabilityZoneMappings,omitempty"` + StorageToNetworkProximity *RegionStorageToNetworkProximity `json:"storageToNetworkProximity,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_regioninfoavailabilityzonemappingsinlined.go b/resource-manager/netapp/2025-06-01/netappresource/model_regioninfoavailabilityzonemappingsinlined.go new file mode 100644 index 00000000000..36fce7489dc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_regioninfoavailabilityzonemappingsinlined.go @@ -0,0 +1,9 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegionInfoAvailabilityZoneMappingsInlined struct { + AvailabilityZone *string `json:"availabilityZone,omitempty"` + IsAvailable *bool `json:"isAvailable,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_regioninforesource.go b/resource-manager/netapp/2025-06-01/netappresource/model_regioninforesource.go new file mode 100644 index 00000000000..9e9a94461d9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_regioninforesource.go @@ -0,0 +1,16 @@ +package netappresource + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegionInfoResource struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *RegionInfo `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_resourcenameavailabilityrequest.go b/resource-manager/netapp/2025-06-01/netappresource/model_resourcenameavailabilityrequest.go new file mode 100644 index 00000000000..0bfbaf4c0e9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_resourcenameavailabilityrequest.go @@ -0,0 +1,10 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResourceNameAvailabilityRequest struct { + Name string `json:"name"` + ResourceGroup string `json:"resourceGroup"` + Type CheckNameResourceTypes `json:"type"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_subscriptionquotaitem.go b/resource-manager/netapp/2025-06-01/netappresource/model_subscriptionquotaitem.go new file mode 100644 index 00000000000..88a5cbd39c1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_subscriptionquotaitem.go @@ -0,0 +1,16 @@ +package netappresource + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubscriptionQuotaItem struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SubscriptionQuotaItemProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_subscriptionquotaitemproperties.go b/resource-manager/netapp/2025-06-01/netappresource/model_subscriptionquotaitemproperties.go new file mode 100644 index 00000000000..01cdd538c05 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_subscriptionquotaitemproperties.go @@ -0,0 +1,9 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubscriptionQuotaItemProperties struct { + Current *int64 `json:"current,omitempty"` + Default *int64 `json:"default,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_updatenetworksiblingsetrequest.go b/resource-manager/netapp/2025-06-01/netappresource/model_updatenetworksiblingsetrequest.go new file mode 100644 index 00000000000..ea780814344 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_updatenetworksiblingsetrequest.go @@ -0,0 +1,11 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateNetworkSiblingSetRequest struct { + NetworkFeatures NetworkFeatures `json:"networkFeatures"` + NetworkSiblingSetId string `json:"networkSiblingSetId"` + NetworkSiblingSetStateId string `json:"networkSiblingSetStateId"` + SubnetId string `json:"subnetId"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_usagename.go b/resource-manager/netapp/2025-06-01/netappresource/model_usagename.go new file mode 100644 index 00000000000..06a9cf3c6c2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_usagename.go @@ -0,0 +1,9 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UsageName struct { + LocalizedValue *string `json:"localizedValue,omitempty"` + Value *string `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_usageproperties.go b/resource-manager/netapp/2025-06-01/netappresource/model_usageproperties.go new file mode 100644 index 00000000000..db12798cc50 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_usageproperties.go @@ -0,0 +1,10 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UsageProperties struct { + CurrentValue *int64 `json:"currentValue,omitempty"` + Limit *int64 `json:"limit,omitempty"` + Unit *string `json:"unit,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/model_usageresult.go b/resource-manager/netapp/2025-06-01/netappresource/model_usageresult.go new file mode 100644 index 00000000000..90c761ada06 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/model_usageresult.go @@ -0,0 +1,10 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UsageResult struct { + Id *string `json:"id,omitempty"` + Name *UsageName `json:"name,omitempty"` + Properties *UsageProperties `json:"properties,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/predicates.go b/resource-manager/netapp/2025-06-01/netappresource/predicates.go new file mode 100644 index 00000000000..731a5874c9e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/predicates.go @@ -0,0 +1,63 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RegionInfoResourceOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p RegionInfoResourceOperationPredicate) Matches(input RegionInfoResource) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type SubscriptionQuotaItemOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p SubscriptionQuotaItemOperationPredicate) Matches(input SubscriptionQuotaItem) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} + +type UsageResultOperationPredicate struct { + Id *string +} + +func (p UsageResultOperationPredicate) Matches(input UsageResult) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + return true +} diff --git a/resource-manager/netapp/2025-06-01/netappresource/version.go b/resource-manager/netapp/2025-06-01/netappresource/version.go new file mode 100644 index 00000000000..fd62c46fcba --- /dev/null +++ b/resource-manager/netapp/2025-06-01/netappresource/version.go @@ -0,0 +1,10 @@ +package netappresource + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/netappresource/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/poolchange/README.md b/resource-manager/netapp/2025-06-01/poolchange/README.md new file mode 100644 index 00000000000..e62aaedfd72 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/poolchange/README.md @@ -0,0 +1,37 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/poolchange` Documentation + +The `poolchange` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/poolchange" +``` + + +### Client Initialization + +```go +client := poolchange.NewPoolChangeClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `PoolChangeClient.VolumesPoolChange` + +```go +ctx := context.TODO() +id := poolchange.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := poolchange.PoolChangeRequest{ + // ... +} + + +if err := client.VolumesPoolChangeThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/poolchange/client.go b/resource-manager/netapp/2025-06-01/poolchange/client.go new file mode 100644 index 00000000000..a83eadc3fe1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/poolchange/client.go @@ -0,0 +1,26 @@ +package poolchange + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolChangeClient struct { + Client *resourcemanager.Client +} + +func NewPoolChangeClientWithBaseURI(sdkApi sdkEnv.Api) (*PoolChangeClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "poolchange", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating PoolChangeClient: %+v", err) + } + + return &PoolChangeClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/poolchange/id_volume.go b/resource-manager/netapp/2025-06-01/poolchange/id_volume.go new file mode 100644 index 00000000000..db883b5bdcb --- /dev/null +++ b/resource-manager/netapp/2025-06-01/poolchange/id_volume.go @@ -0,0 +1,148 @@ +package poolchange + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/poolchange/id_volume_test.go b/resource-manager/netapp/2025-06-01/poolchange/id_volume_test.go new file mode 100644 index 00000000000..85f31b29d70 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/poolchange/id_volume_test.go @@ -0,0 +1,372 @@ +package poolchange + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/poolchange/method_volumespoolchange.go b/resource-manager/netapp/2025-06-01/poolchange/method_volumespoolchange.go new file mode 100644 index 00000000000..7a0ca0e6450 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/poolchange/method_volumespoolchange.go @@ -0,0 +1,74 @@ +package poolchange + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesPoolChangeOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesPoolChange ... +func (c PoolChangeClient) VolumesPoolChange(ctx context.Context, id VolumeId, input PoolChangeRequest) (result VolumesPoolChangeOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/poolChange", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesPoolChangeThenPoll performs VolumesPoolChange then polls until it's completed +func (c PoolChangeClient) VolumesPoolChangeThenPoll(ctx context.Context, id VolumeId, input PoolChangeRequest) error { + result, err := c.VolumesPoolChange(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesPoolChange: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesPoolChange: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/poolchange/model_poolchangerequest.go b/resource-manager/netapp/2025-06-01/poolchange/model_poolchangerequest.go new file mode 100644 index 00000000000..19402a5b699 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/poolchange/model_poolchangerequest.go @@ -0,0 +1,8 @@ +package poolchange + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PoolChangeRequest struct { + NewPoolResourceId string `json:"newPoolResourceId"` +} diff --git a/resource-manager/netapp/2025-06-01/poolchange/version.go b/resource-manager/netapp/2025-06-01/poolchange/version.go new file mode 100644 index 00000000000..f451b1b0647 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/poolchange/version.go @@ -0,0 +1,10 @@ +package poolchange + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/poolchange/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/resetcifspassword/client.go b/resource-manager/netapp/2025-06-01/resetcifspassword/client.go new file mode 100644 index 00000000000..ba2cbb22bb0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/resetcifspassword/client.go @@ -0,0 +1,26 @@ +package resetcifspassword + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ResetCifsPasswordClient struct { + Client *resourcemanager.Client +} + +func NewResetCifsPasswordClientWithBaseURI(sdkApi sdkEnv.Api) (*ResetCifsPasswordClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "resetcifspassword", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating ResetCifsPasswordClient: %+v", err) + } + + return &ResetCifsPasswordClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/resetcifspassword/id_volume.go b/resource-manager/netapp/2025-06-01/resetcifspassword/id_volume.go new file mode 100644 index 00000000000..c5a2844bb7b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/resetcifspassword/id_volume.go @@ -0,0 +1,148 @@ +package resetcifspassword + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/resetcifspassword/id_volume_test.go b/resource-manager/netapp/2025-06-01/resetcifspassword/id_volume_test.go new file mode 100644 index 00000000000..0b3b2133026 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/resetcifspassword/id_volume_test.go @@ -0,0 +1,372 @@ +package resetcifspassword + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/resetcifspassword/method_volumesresetcifspassword.go b/resource-manager/netapp/2025-06-01/resetcifspassword/method_volumesresetcifspassword.go new file mode 100644 index 00000000000..d8bdd81cd6f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/resetcifspassword/method_volumesresetcifspassword.go @@ -0,0 +1,69 @@ +package resetcifspassword + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesResetCifsPasswordOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesResetCifsPassword ... +func (c ResetCifsPasswordClient) VolumesResetCifsPassword(ctx context.Context, id VolumeId) (result VolumesResetCifsPasswordOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/resetCifsPassword", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesResetCifsPasswordThenPoll performs VolumesResetCifsPassword then polls until it's completed +func (c ResetCifsPasswordClient) VolumesResetCifsPasswordThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesResetCifsPassword(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesResetCifsPassword: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesResetCifsPassword: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/resetcifspassword/version.go b/resource-manager/netapp/2025-06-01/resetcifspassword/version.go new file mode 100644 index 00000000000..0ee29da8b97 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/resetcifspassword/version.go @@ -0,0 +1,10 @@ +package resetcifspassword + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/resetcifspassword/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/restore/README.md b/resource-manager/netapp/2025-06-01/restore/README.md new file mode 100644 index 00000000000..2ba7ee75e11 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/README.md @@ -0,0 +1,36 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/restore` Documentation + +The `restore` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/restore" +``` + + +### Client Initialization + +```go +client := restore.NewRestoreClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `RestoreClient.BackupsGetVolumeLatestRestoreStatus` + +```go +ctx := context.TODO() +id := restore.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +read, err := client.BackupsGetVolumeLatestRestoreStatus(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/netapp/2025-06-01/restore/client.go b/resource-manager/netapp/2025-06-01/restore/client.go new file mode 100644 index 00000000000..1ce453be7c3 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/client.go @@ -0,0 +1,26 @@ +package restore + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RestoreClient struct { + Client *resourcemanager.Client +} + +func NewRestoreClientWithBaseURI(sdkApi sdkEnv.Api) (*RestoreClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "restore", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating RestoreClient: %+v", err) + } + + return &RestoreClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/restore/constants.go b/resource-manager/netapp/2025-06-01/restore/constants.go new file mode 100644 index 00000000000..c734520711f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/constants.go @@ -0,0 +1,101 @@ +package restore + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MirrorState string + +const ( + MirrorStateBroken MirrorState = "Broken" + MirrorStateMirrored MirrorState = "Mirrored" + MirrorStateUninitialized MirrorState = "Uninitialized" +) + +func PossibleValuesForMirrorState() []string { + return []string{ + string(MirrorStateBroken), + string(MirrorStateMirrored), + string(MirrorStateUninitialized), + } +} + +func (s *MirrorState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMirrorState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMirrorState(input string) (*MirrorState, error) { + vals := map[string]MirrorState{ + "broken": MirrorStateBroken, + "mirrored": MirrorStateMirrored, + "uninitialized": MirrorStateUninitialized, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MirrorState(input) + return &out, nil +} + +type RelationshipStatus string + +const ( + RelationshipStatusFailed RelationshipStatus = "Failed" + RelationshipStatusIdle RelationshipStatus = "Idle" + RelationshipStatusTransferring RelationshipStatus = "Transferring" + RelationshipStatusUnknown RelationshipStatus = "Unknown" +) + +func PossibleValuesForRelationshipStatus() []string { + return []string{ + string(RelationshipStatusFailed), + string(RelationshipStatusIdle), + string(RelationshipStatusTransferring), + string(RelationshipStatusUnknown), + } +} + +func (s *RelationshipStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseRelationshipStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseRelationshipStatus(input string) (*RelationshipStatus, error) { + vals := map[string]RelationshipStatus{ + "failed": RelationshipStatusFailed, + "idle": RelationshipStatusIdle, + "transferring": RelationshipStatusTransferring, + "unknown": RelationshipStatusUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := RelationshipStatus(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/restore/id_volume.go b/resource-manager/netapp/2025-06-01/restore/id_volume.go new file mode 100644 index 00000000000..9e76588a555 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/id_volume.go @@ -0,0 +1,148 @@ +package restore + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/restore/id_volume_test.go b/resource-manager/netapp/2025-06-01/restore/id_volume_test.go new file mode 100644 index 00000000000..7762bc57cb4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/id_volume_test.go @@ -0,0 +1,372 @@ +package restore + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/restore/method_backupsgetvolumelatestrestorestatus.go b/resource-manager/netapp/2025-06-01/restore/method_backupsgetvolumelatestrestorestatus.go new file mode 100644 index 00000000000..42b453d7f1a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/method_backupsgetvolumelatestrestorestatus.go @@ -0,0 +1,54 @@ +package restore + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BackupsGetVolumeLatestRestoreStatusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *RestoreStatus +} + +// BackupsGetVolumeLatestRestoreStatus ... +func (c RestoreClient) BackupsGetVolumeLatestRestoreStatus(ctx context.Context, id VolumeId) (result BackupsGetVolumeLatestRestoreStatusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/latestRestoreStatus/current", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model RestoreStatus + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/restore/model_restorestatus.go b/resource-manager/netapp/2025-06-01/restore/model_restorestatus.go new file mode 100644 index 00000000000..546d31e9ce7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/model_restorestatus.go @@ -0,0 +1,13 @@ +package restore + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RestoreStatus struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Healthy *bool `json:"healthy,omitempty"` + MirrorState *MirrorState `json:"mirrorState,omitempty"` + RelationshipStatus *RelationshipStatus `json:"relationshipStatus,omitempty"` + TotalTransferBytes *int64 `json:"totalTransferBytes,omitempty"` + UnhealthyReason *string `json:"unhealthyReason,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/restore/version.go b/resource-manager/netapp/2025-06-01/restore/version.go new file mode 100644 index 00000000000..67c5934acb9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/restore/version.go @@ -0,0 +1,10 @@ +package restore + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/restore/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/README.md b/resource-manager/netapp/2025-06-01/snapshotpolicy/README.md new file mode 100644 index 00000000000..121be98400e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/README.md @@ -0,0 +1,102 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshotpolicy` Documentation + +The `snapshotpolicy` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshotpolicy" +``` + + +### Client Initialization + +```go +client := snapshotpolicy.NewSnapshotPolicyClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `SnapshotPolicyClient.SnapshotPoliciesCreate` + +```go +ctx := context.TODO() +id := snapshotpolicy.NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName") + +payload := snapshotpolicy.SnapshotPolicy{ + // ... +} + + +read, err := client.SnapshotPoliciesCreate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotPolicyClient.SnapshotPoliciesDelete` + +```go +ctx := context.TODO() +id := snapshotpolicy.NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName") + +if err := client.SnapshotPoliciesDeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `SnapshotPolicyClient.SnapshotPoliciesGet` + +```go +ctx := context.TODO() +id := snapshotpolicy.NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName") + +read, err := client.SnapshotPoliciesGet(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotPolicyClient.SnapshotPoliciesList` + +```go +ctx := context.TODO() +id := snapshotpolicy.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +read, err := client.SnapshotPoliciesList(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotPolicyClient.SnapshotPoliciesUpdate` + +```go +ctx := context.TODO() +id := snapshotpolicy.NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName") + +payload := snapshotpolicy.SnapshotPolicyPatch{ + // ... +} + + +if err := client.SnapshotPoliciesUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/client.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/client.go new file mode 100644 index 00000000000..e92eedba749 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/client.go @@ -0,0 +1,26 @@ +package snapshotpolicy + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPolicyClient struct { + Client *resourcemanager.Client +} + +func NewSnapshotPolicyClientWithBaseURI(sdkApi sdkEnv.Api) (*SnapshotPolicyClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "snapshotpolicy", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating SnapshotPolicyClient: %+v", err) + } + + return &SnapshotPolicyClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/id_netappaccount.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_netappaccount.go new file mode 100644 index 00000000000..14deafbb222 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_netappaccount.go @@ -0,0 +1,130 @@ +package snapshotpolicy + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&NetAppAccountId{}) +} + +var _ resourceids.ResourceId = &NetAppAccountId{} + +// NetAppAccountId is a struct representing the Resource ID for a Net App Account +type NetAppAccountId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string +} + +// NewNetAppAccountID returns a new NetAppAccountId struct +func NewNetAppAccountID(subscriptionId string, resourceGroupName string, netAppAccountName string) NetAppAccountId { + return NetAppAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + } +} + +// ParseNetAppAccountID parses 'input' into a NetAppAccountId +func ParseNetAppAccountID(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseNetAppAccountIDInsensitively parses 'input' case-insensitively into a NetAppAccountId +// note: this method should only be used for API response data and not user input +func ParseNetAppAccountIDInsensitively(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *NetAppAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + return nil +} + +// ValidateNetAppAccountID checks that 'input' can be parsed as a Net App Account ID +func ValidateNetAppAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNetAppAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Net App Account ID +func (id NetAppAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Net App Account ID +func (id NetAppAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + } +} + +// String returns a human-readable description of this Net App Account ID +func (id NetAppAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + } + return fmt.Sprintf("Net App Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/id_netappaccount_test.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_netappaccount_test.go new file mode 100644 index 00000000000..37ac2f7a327 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_netappaccount_test.go @@ -0,0 +1,282 @@ +package snapshotpolicy + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &NetAppAccountId{} + +func TestNewNetAppAccountID(t *testing.T) { + id := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } +} + +func TestFormatNetAppAccountID(t *testing.T) { + actual := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseNetAppAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestParseNetAppAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestSegmentsForNetAppAccountId(t *testing.T) { + segments := NetAppAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("NetAppAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/id_snapshotpolicy.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_snapshotpolicy.go new file mode 100644 index 00000000000..0f5ca81ebc0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_snapshotpolicy.go @@ -0,0 +1,139 @@ +package snapshotpolicy + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SnapshotPolicyId{}) +} + +var _ resourceids.ResourceId = &SnapshotPolicyId{} + +// SnapshotPolicyId is a struct representing the Resource ID for a Snapshot Policy +type SnapshotPolicyId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + SnapshotPolicyName string +} + +// NewSnapshotPolicyID returns a new SnapshotPolicyId struct +func NewSnapshotPolicyID(subscriptionId string, resourceGroupName string, netAppAccountName string, snapshotPolicyName string) SnapshotPolicyId { + return SnapshotPolicyId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + SnapshotPolicyName: snapshotPolicyName, + } +} + +// ParseSnapshotPolicyID parses 'input' into a SnapshotPolicyId +func ParseSnapshotPolicyID(input string) (*SnapshotPolicyId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotPolicyId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotPolicyId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSnapshotPolicyIDInsensitively parses 'input' case-insensitively into a SnapshotPolicyId +// note: this method should only be used for API response data and not user input +func ParseSnapshotPolicyIDInsensitively(input string) (*SnapshotPolicyId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotPolicyId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotPolicyId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SnapshotPolicyId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.SnapshotPolicyName, ok = input.Parsed["snapshotPolicyName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "snapshotPolicyName", input) + } + + return nil +} + +// ValidateSnapshotPolicyID checks that 'input' can be parsed as a Snapshot Policy ID +func ValidateSnapshotPolicyID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSnapshotPolicyID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Snapshot Policy ID +func (id SnapshotPolicyId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/snapshotPolicies/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.SnapshotPolicyName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Snapshot Policy ID +func (id SnapshotPolicyId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticSnapshotPolicies", "snapshotPolicies", "snapshotPolicies"), + resourceids.UserSpecifiedSegment("snapshotPolicyName", "snapshotPolicyName"), + } +} + +// String returns a human-readable description of this Snapshot Policy ID +func (id SnapshotPolicyId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Snapshot Policy Name: %q", id.SnapshotPolicyName), + } + return fmt.Sprintf("Snapshot Policy (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/id_snapshotpolicy_test.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_snapshotpolicy_test.go new file mode 100644 index 00000000000..47db3863892 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/id_snapshotpolicy_test.go @@ -0,0 +1,327 @@ +package snapshotpolicy + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SnapshotPolicyId{} + +func TestNewSnapshotPolicyID(t *testing.T) { + id := NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.SnapshotPolicyName != "snapshotPolicyName" { + t.Fatalf("Expected %q but got %q for Segment 'SnapshotPolicyName'", id.SnapshotPolicyName, "snapshotPolicyName") + } +} + +func TestFormatSnapshotPolicyID(t *testing.T) { + actual := NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSnapshotPolicyID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotPolicyId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName", + Expected: &SnapshotPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + SnapshotPolicyName: "snapshotPolicyName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotPolicyID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.SnapshotPolicyName != v.Expected.SnapshotPolicyName { + t.Fatalf("Expected %q but got %q for SnapshotPolicyName", v.Expected.SnapshotPolicyName, actual.SnapshotPolicyName) + } + + } +} + +func TestParseSnapshotPolicyIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotPolicyId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/sNaPsHoTpOlIcIeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName", + Expected: &SnapshotPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + SnapshotPolicyName: "snapshotPolicyName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/sNaPsHoTpOlIcIeS/sNaPsHoTpOlIcYnAmE", + Expected: &SnapshotPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + SnapshotPolicyName: "sNaPsHoTpOlIcYnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/sNaPsHoTpOlIcIeS/sNaPsHoTpOlIcYnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotPolicyIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.SnapshotPolicyName != v.Expected.SnapshotPolicyName { + t.Fatalf("Expected %q but got %q for SnapshotPolicyName", v.Expected.SnapshotPolicyName, actual.SnapshotPolicyName) + } + + } +} + +func TestSegmentsForSnapshotPolicyId(t *testing.T) { + segments := SnapshotPolicyId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SnapshotPolicyId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciescreate.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciescreate.go new file mode 100644 index 00000000000..8210e2fad73 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciescreate.go @@ -0,0 +1,58 @@ +package snapshotpolicy + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPoliciesCreateOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SnapshotPolicy +} + +// SnapshotPoliciesCreate ... +func (c SnapshotPolicyClient) SnapshotPoliciesCreate(ctx context.Context, id SnapshotPolicyId, input SnapshotPolicy) (result SnapshotPoliciesCreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SnapshotPolicy + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesdelete.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesdelete.go new file mode 100644 index 00000000000..88822a15bf8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesdelete.go @@ -0,0 +1,71 @@ +package snapshotpolicy + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPoliciesDeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// SnapshotPoliciesDelete ... +func (c SnapshotPolicyClient) SnapshotPoliciesDelete(ctx context.Context, id SnapshotPolicyId) (result SnapshotPoliciesDeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SnapshotPoliciesDeleteThenPoll performs SnapshotPoliciesDelete then polls until it's completed +func (c SnapshotPolicyClient) SnapshotPoliciesDeleteThenPoll(ctx context.Context, id SnapshotPolicyId) error { + result, err := c.SnapshotPoliciesDelete(ctx, id) + if err != nil { + return fmt.Errorf("performing SnapshotPoliciesDelete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SnapshotPoliciesDelete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesget.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesget.go new file mode 100644 index 00000000000..15d2b69d7c1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesget.go @@ -0,0 +1,53 @@ +package snapshotpolicy + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPoliciesGetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SnapshotPolicy +} + +// SnapshotPoliciesGet ... +func (c SnapshotPolicyClient) SnapshotPoliciesGet(ctx context.Context, id SnapshotPolicyId) (result SnapshotPoliciesGetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SnapshotPolicy + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpolicieslist.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpolicieslist.go new file mode 100644 index 00000000000..eca667d1eb4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpolicieslist.go @@ -0,0 +1,54 @@ +package snapshotpolicy + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPoliciesListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SnapshotPoliciesList +} + +// SnapshotPoliciesList ... +func (c SnapshotPolicyClient) SnapshotPoliciesList(ctx context.Context, id NetAppAccountId) (result SnapshotPoliciesListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/snapshotPolicies", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SnapshotPoliciesList + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesupdate.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesupdate.go new file mode 100644 index 00000000000..c0cf93edf94 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/method_snapshotpoliciesupdate.go @@ -0,0 +1,75 @@ +package snapshotpolicy + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPoliciesUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *SnapshotPolicy +} + +// SnapshotPoliciesUpdate ... +func (c SnapshotPolicyClient) SnapshotPoliciesUpdate(ctx context.Context, id SnapshotPolicyId, input SnapshotPolicyPatch) (result SnapshotPoliciesUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// SnapshotPoliciesUpdateThenPoll performs SnapshotPoliciesUpdate then polls until it's completed +func (c SnapshotPolicyClient) SnapshotPoliciesUpdateThenPoll(ctx context.Context, id SnapshotPolicyId, input SnapshotPolicyPatch) error { + result, err := c.SnapshotPoliciesUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing SnapshotPoliciesUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after SnapshotPoliciesUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_dailyschedule.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_dailyschedule.go new file mode 100644 index 00000000000..bdafb72e6e0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_dailyschedule.go @@ -0,0 +1,11 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DailySchedule struct { + Hour *int64 `json:"hour,omitempty"` + Minute *int64 `json:"minute,omitempty"` + SnapshotsToKeep *int64 `json:"snapshotsToKeep,omitempty"` + UsedBytes *int64 `json:"usedBytes,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_hourlyschedule.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_hourlyschedule.go new file mode 100644 index 00000000000..a1e5d0fa028 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_hourlyschedule.go @@ -0,0 +1,10 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type HourlySchedule struct { + Minute *int64 `json:"minute,omitempty"` + SnapshotsToKeep *int64 `json:"snapshotsToKeep,omitempty"` + UsedBytes *int64 `json:"usedBytes,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_monthlyschedule.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_monthlyschedule.go new file mode 100644 index 00000000000..02cbcad6a59 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_monthlyschedule.go @@ -0,0 +1,12 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MonthlySchedule struct { + DaysOfMonth *string `json:"daysOfMonth,omitempty"` + Hour *int64 `json:"hour,omitempty"` + Minute *int64 `json:"minute,omitempty"` + SnapshotsToKeep *int64 `json:"snapshotsToKeep,omitempty"` + UsedBytes *int64 `json:"usedBytes,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicieslist.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicieslist.go new file mode 100644 index 00000000000..910e2519adb --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicieslist.go @@ -0,0 +1,8 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPoliciesList struct { + Value *[]SnapshotPolicy `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicy.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicy.go new file mode 100644 index 00000000000..310cfaaf1bb --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicy.go @@ -0,0 +1,19 @@ +package snapshotpolicy + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPolicy struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties SnapshotPolicyProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicypatch.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicypatch.go new file mode 100644 index 00000000000..76df9be172b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicypatch.go @@ -0,0 +1,13 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPolicyPatch struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SnapshotPolicyProperties `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicyproperties.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicyproperties.go new file mode 100644 index 00000000000..fb4fb0fcb05 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_snapshotpolicyproperties.go @@ -0,0 +1,13 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPolicyProperties struct { + DailySchedule *DailySchedule `json:"dailySchedule,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + HourlySchedule *HourlySchedule `json:"hourlySchedule,omitempty"` + MonthlySchedule *MonthlySchedule `json:"monthlySchedule,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + WeeklySchedule *WeeklySchedule `json:"weeklySchedule,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/model_weeklyschedule.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_weeklyschedule.go new file mode 100644 index 00000000000..128ece7ef5e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/model_weeklyschedule.go @@ -0,0 +1,12 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type WeeklySchedule struct { + Day *string `json:"day,omitempty"` + Hour *int64 `json:"hour,omitempty"` + Minute *int64 `json:"minute,omitempty"` + SnapshotsToKeep *int64 `json:"snapshotsToKeep,omitempty"` + UsedBytes *int64 `json:"usedBytes,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicy/version.go b/resource-manager/netapp/2025-06-01/snapshotpolicy/version.go new file mode 100644 index 00000000000..866d4131838 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicy/version.go @@ -0,0 +1,10 @@ +package snapshotpolicy + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/snapshotpolicy/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/README.md b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/README.md new file mode 100644 index 00000000000..2129ab5a815 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/README.md @@ -0,0 +1,36 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes` Documentation + +The `snapshotpolicylistvolumes` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes" +``` + + +### Client Initialization + +```go +client := snapshotpolicylistvolumes.NewSnapshotPolicyListVolumesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `SnapshotPolicyListVolumesClient.SnapshotPoliciesListVolumes` + +```go +ctx := context.TODO() +id := snapshotpolicylistvolumes.NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName") + +read, err := client.SnapshotPoliciesListVolumes(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/client.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/client.go new file mode 100644 index 00000000000..6098b871f64 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/client.go @@ -0,0 +1,26 @@ +package snapshotpolicylistvolumes + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPolicyListVolumesClient struct { + Client *resourcemanager.Client +} + +func NewSnapshotPolicyListVolumesClientWithBaseURI(sdkApi sdkEnv.Api) (*SnapshotPolicyListVolumesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "snapshotpolicylistvolumes", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating SnapshotPolicyListVolumesClient: %+v", err) + } + + return &SnapshotPolicyListVolumesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/constants.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/constants.go new file mode 100644 index 00000000000..4c8acd5ed48 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/constants.go @@ -0,0 +1,734 @@ +package snapshotpolicylistvolumes + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AcceptGrowCapacityPoolForShortTermCloneSplit string + +const ( + AcceptGrowCapacityPoolForShortTermCloneSplitAccepted AcceptGrowCapacityPoolForShortTermCloneSplit = "Accepted" + AcceptGrowCapacityPoolForShortTermCloneSplitDeclined AcceptGrowCapacityPoolForShortTermCloneSplit = "Declined" +) + +func PossibleValuesForAcceptGrowCapacityPoolForShortTermCloneSplit() []string { + return []string{ + string(AcceptGrowCapacityPoolForShortTermCloneSplitAccepted), + string(AcceptGrowCapacityPoolForShortTermCloneSplitDeclined), + } +} + +func (s *AcceptGrowCapacityPoolForShortTermCloneSplit) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAcceptGrowCapacityPoolForShortTermCloneSplit(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAcceptGrowCapacityPoolForShortTermCloneSplit(input string) (*AcceptGrowCapacityPoolForShortTermCloneSplit, error) { + vals := map[string]AcceptGrowCapacityPoolForShortTermCloneSplit{ + "accepted": AcceptGrowCapacityPoolForShortTermCloneSplitAccepted, + "declined": AcceptGrowCapacityPoolForShortTermCloneSplitDeclined, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AcceptGrowCapacityPoolForShortTermCloneSplit(input) + return &out, nil +} + +type AvsDataStore string + +const ( + AvsDataStoreDisabled AvsDataStore = "Disabled" + AvsDataStoreEnabled AvsDataStore = "Enabled" +) + +func PossibleValuesForAvsDataStore() []string { + return []string{ + string(AvsDataStoreDisabled), + string(AvsDataStoreEnabled), + } +} + +func (s *AvsDataStore) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAvsDataStore(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAvsDataStore(input string) (*AvsDataStore, error) { + vals := map[string]AvsDataStore{ + "disabled": AvsDataStoreDisabled, + "enabled": AvsDataStoreEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AvsDataStore(input) + return &out, nil +} + +type ChownMode string + +const ( + ChownModeRestricted ChownMode = "Restricted" + ChownModeUnrestricted ChownMode = "Unrestricted" +) + +func PossibleValuesForChownMode() []string { + return []string{ + string(ChownModeRestricted), + string(ChownModeUnrestricted), + } +} + +func (s *ChownMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseChownMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseChownMode(input string) (*ChownMode, error) { + vals := map[string]ChownMode{ + "restricted": ChownModeRestricted, + "unrestricted": ChownModeUnrestricted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ChownMode(input) + return &out, nil +} + +type CoolAccessRetrievalPolicy string + +const ( + CoolAccessRetrievalPolicyDefault CoolAccessRetrievalPolicy = "Default" + CoolAccessRetrievalPolicyNever CoolAccessRetrievalPolicy = "Never" + CoolAccessRetrievalPolicyOnRead CoolAccessRetrievalPolicy = "OnRead" +) + +func PossibleValuesForCoolAccessRetrievalPolicy() []string { + return []string{ + string(CoolAccessRetrievalPolicyDefault), + string(CoolAccessRetrievalPolicyNever), + string(CoolAccessRetrievalPolicyOnRead), + } +} + +func (s *CoolAccessRetrievalPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessRetrievalPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessRetrievalPolicy(input string) (*CoolAccessRetrievalPolicy, error) { + vals := map[string]CoolAccessRetrievalPolicy{ + "default": CoolAccessRetrievalPolicyDefault, + "never": CoolAccessRetrievalPolicyNever, + "onread": CoolAccessRetrievalPolicyOnRead, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessRetrievalPolicy(input) + return &out, nil +} + +type CoolAccessTieringPolicy string + +const ( + CoolAccessTieringPolicyAuto CoolAccessTieringPolicy = "Auto" + CoolAccessTieringPolicySnapshotOnly CoolAccessTieringPolicy = "SnapshotOnly" +) + +func PossibleValuesForCoolAccessTieringPolicy() []string { + return []string{ + string(CoolAccessTieringPolicyAuto), + string(CoolAccessTieringPolicySnapshotOnly), + } +} + +func (s *CoolAccessTieringPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessTieringPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessTieringPolicy(input string) (*CoolAccessTieringPolicy, error) { + vals := map[string]CoolAccessTieringPolicy{ + "auto": CoolAccessTieringPolicyAuto, + "snapshotonly": CoolAccessTieringPolicySnapshotOnly, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessTieringPolicy(input) + return &out, nil +} + +type EnableSubvolumes string + +const ( + EnableSubvolumesDisabled EnableSubvolumes = "Disabled" + EnableSubvolumesEnabled EnableSubvolumes = "Enabled" +) + +func PossibleValuesForEnableSubvolumes() []string { + return []string{ + string(EnableSubvolumesDisabled), + string(EnableSubvolumesEnabled), + } +} + +func (s *EnableSubvolumes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEnableSubvolumes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEnableSubvolumes(input string) (*EnableSubvolumes, error) { + vals := map[string]EnableSubvolumes{ + "disabled": EnableSubvolumesDisabled, + "enabled": EnableSubvolumesEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EnableSubvolumes(input) + return &out, nil +} + +type EncryptionKeySource string + +const ( + EncryptionKeySourceMicrosoftPointKeyVault EncryptionKeySource = "Microsoft.KeyVault" + EncryptionKeySourceMicrosoftPointNetApp EncryptionKeySource = "Microsoft.NetApp" +) + +func PossibleValuesForEncryptionKeySource() []string { + return []string{ + string(EncryptionKeySourceMicrosoftPointKeyVault), + string(EncryptionKeySourceMicrosoftPointNetApp), + } +} + +func (s *EncryptionKeySource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEncryptionKeySource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEncryptionKeySource(input string) (*EncryptionKeySource, error) { + vals := map[string]EncryptionKeySource{ + "microsoft.keyvault": EncryptionKeySourceMicrosoftPointKeyVault, + "microsoft.netapp": EncryptionKeySourceMicrosoftPointNetApp, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EncryptionKeySource(input) + return &out, nil +} + +type EndpointType string + +const ( + EndpointTypeDst EndpointType = "dst" + EndpointTypeSrc EndpointType = "src" +) + +func PossibleValuesForEndpointType() []string { + return []string{ + string(EndpointTypeDst), + string(EndpointTypeSrc), + } +} + +func (s *EndpointType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEndpointType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEndpointType(input string) (*EndpointType, error) { + vals := map[string]EndpointType{ + "dst": EndpointTypeDst, + "src": EndpointTypeSrc, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EndpointType(input) + return &out, nil +} + +type FileAccessLogs string + +const ( + FileAccessLogsDisabled FileAccessLogs = "Disabled" + FileAccessLogsEnabled FileAccessLogs = "Enabled" +) + +func PossibleValuesForFileAccessLogs() []string { + return []string{ + string(FileAccessLogsDisabled), + string(FileAccessLogsEnabled), + } +} + +func (s *FileAccessLogs) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseFileAccessLogs(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseFileAccessLogs(input string) (*FileAccessLogs, error) { + vals := map[string]FileAccessLogs{ + "disabled": FileAccessLogsDisabled, + "enabled": FileAccessLogsEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := FileAccessLogs(input) + return &out, nil +} + +type NetworkFeatures string + +const ( + NetworkFeaturesBasic NetworkFeatures = "Basic" + NetworkFeaturesBasicStandard NetworkFeatures = "Basic_Standard" + NetworkFeaturesStandard NetworkFeatures = "Standard" + NetworkFeaturesStandardBasic NetworkFeatures = "Standard_Basic" +) + +func PossibleValuesForNetworkFeatures() []string { + return []string{ + string(NetworkFeaturesBasic), + string(NetworkFeaturesBasicStandard), + string(NetworkFeaturesStandard), + string(NetworkFeaturesStandardBasic), + } +} + +func (s *NetworkFeatures) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkFeatures(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkFeatures(input string) (*NetworkFeatures, error) { + vals := map[string]NetworkFeatures{ + "basic": NetworkFeaturesBasic, + "basic_standard": NetworkFeaturesBasicStandard, + "standard": NetworkFeaturesStandard, + "standard_basic": NetworkFeaturesStandardBasic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkFeatures(input) + return &out, nil +} + +type ReplicationSchedule string + +const ( + ReplicationScheduleDaily ReplicationSchedule = "daily" + ReplicationScheduleHourly ReplicationSchedule = "hourly" + ReplicationScheduleOneZerominutely ReplicationSchedule = "_10minutely" +) + +func PossibleValuesForReplicationSchedule() []string { + return []string{ + string(ReplicationScheduleDaily), + string(ReplicationScheduleHourly), + string(ReplicationScheduleOneZerominutely), + } +} + +func (s *ReplicationSchedule) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationSchedule(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationSchedule(input string) (*ReplicationSchedule, error) { + vals := map[string]ReplicationSchedule{ + "daily": ReplicationScheduleDaily, + "hourly": ReplicationScheduleHourly, + "_10minutely": ReplicationScheduleOneZerominutely, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationSchedule(input) + return &out, nil +} + +type ReplicationType string + +const ( + ReplicationTypeCrossRegionReplication ReplicationType = "CrossRegionReplication" + ReplicationTypeCrossZoneReplication ReplicationType = "CrossZoneReplication" +) + +func PossibleValuesForReplicationType() []string { + return []string{ + string(ReplicationTypeCrossRegionReplication), + string(ReplicationTypeCrossZoneReplication), + } +} + +func (s *ReplicationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationType(input string) (*ReplicationType, error) { + vals := map[string]ReplicationType{ + "crossregionreplication": ReplicationTypeCrossRegionReplication, + "crosszonereplication": ReplicationTypeCrossZoneReplication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationType(input) + return &out, nil +} + +type SecurityStyle string + +const ( + SecurityStyleNtfs SecurityStyle = "ntfs" + SecurityStyleUnix SecurityStyle = "unix" +) + +func PossibleValuesForSecurityStyle() []string { + return []string{ + string(SecurityStyleNtfs), + string(SecurityStyleUnix), + } +} + +func (s *SecurityStyle) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSecurityStyle(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSecurityStyle(input string) (*SecurityStyle, error) { + vals := map[string]SecurityStyle{ + "ntfs": SecurityStyleNtfs, + "unix": SecurityStyleUnix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SecurityStyle(input) + return &out, nil +} + +type ServiceLevel string + +const ( + ServiceLevelFlexible ServiceLevel = "Flexible" + ServiceLevelPremium ServiceLevel = "Premium" + ServiceLevelStandard ServiceLevel = "Standard" + ServiceLevelStandardZRS ServiceLevel = "StandardZRS" + ServiceLevelUltra ServiceLevel = "Ultra" +) + +func PossibleValuesForServiceLevel() []string { + return []string{ + string(ServiceLevelFlexible), + string(ServiceLevelPremium), + string(ServiceLevelStandard), + string(ServiceLevelStandardZRS), + string(ServiceLevelUltra), + } +} + +func (s *ServiceLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceLevel(input string) (*ServiceLevel, error) { + vals := map[string]ServiceLevel{ + "flexible": ServiceLevelFlexible, + "premium": ServiceLevelPremium, + "standard": ServiceLevelStandard, + "standardzrs": ServiceLevelStandardZRS, + "ultra": ServiceLevelUltra, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceLevel(input) + return &out, nil +} + +type SmbAccessBasedEnumeration string + +const ( + SmbAccessBasedEnumerationDisabled SmbAccessBasedEnumeration = "Disabled" + SmbAccessBasedEnumerationEnabled SmbAccessBasedEnumeration = "Enabled" +) + +func PossibleValuesForSmbAccessBasedEnumeration() []string { + return []string{ + string(SmbAccessBasedEnumerationDisabled), + string(SmbAccessBasedEnumerationEnabled), + } +} + +func (s *SmbAccessBasedEnumeration) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbAccessBasedEnumeration(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbAccessBasedEnumeration(input string) (*SmbAccessBasedEnumeration, error) { + vals := map[string]SmbAccessBasedEnumeration{ + "disabled": SmbAccessBasedEnumerationDisabled, + "enabled": SmbAccessBasedEnumerationEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbAccessBasedEnumeration(input) + return &out, nil +} + +type SmbNonBrowsable string + +const ( + SmbNonBrowsableDisabled SmbNonBrowsable = "Disabled" + SmbNonBrowsableEnabled SmbNonBrowsable = "Enabled" +) + +func PossibleValuesForSmbNonBrowsable() []string { + return []string{ + string(SmbNonBrowsableDisabled), + string(SmbNonBrowsableEnabled), + } +} + +func (s *SmbNonBrowsable) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbNonBrowsable(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbNonBrowsable(input string) (*SmbNonBrowsable, error) { + vals := map[string]SmbNonBrowsable{ + "disabled": SmbNonBrowsableDisabled, + "enabled": SmbNonBrowsableEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbNonBrowsable(input) + return &out, nil +} + +type VolumeStorageToNetworkProximity string + +const ( + VolumeStorageToNetworkProximityAcrossTTwo VolumeStorageToNetworkProximity = "AcrossT2" + VolumeStorageToNetworkProximityDefault VolumeStorageToNetworkProximity = "Default" + VolumeStorageToNetworkProximityTOne VolumeStorageToNetworkProximity = "T1" + VolumeStorageToNetworkProximityTTwo VolumeStorageToNetworkProximity = "T2" +) + +func PossibleValuesForVolumeStorageToNetworkProximity() []string { + return []string{ + string(VolumeStorageToNetworkProximityAcrossTTwo), + string(VolumeStorageToNetworkProximityDefault), + string(VolumeStorageToNetworkProximityTOne), + string(VolumeStorageToNetworkProximityTTwo), + } +} + +func (s *VolumeStorageToNetworkProximity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseVolumeStorageToNetworkProximity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseVolumeStorageToNetworkProximity(input string) (*VolumeStorageToNetworkProximity, error) { + vals := map[string]VolumeStorageToNetworkProximity{ + "acrosst2": VolumeStorageToNetworkProximityAcrossTTwo, + "default": VolumeStorageToNetworkProximityDefault, + "t1": VolumeStorageToNetworkProximityTOne, + "t2": VolumeStorageToNetworkProximityTTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VolumeStorageToNetworkProximity(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/id_snapshotpolicy.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/id_snapshotpolicy.go new file mode 100644 index 00000000000..c366a171e15 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/id_snapshotpolicy.go @@ -0,0 +1,139 @@ +package snapshotpolicylistvolumes + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SnapshotPolicyId{}) +} + +var _ resourceids.ResourceId = &SnapshotPolicyId{} + +// SnapshotPolicyId is a struct representing the Resource ID for a Snapshot Policy +type SnapshotPolicyId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + SnapshotPolicyName string +} + +// NewSnapshotPolicyID returns a new SnapshotPolicyId struct +func NewSnapshotPolicyID(subscriptionId string, resourceGroupName string, netAppAccountName string, snapshotPolicyName string) SnapshotPolicyId { + return SnapshotPolicyId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + SnapshotPolicyName: snapshotPolicyName, + } +} + +// ParseSnapshotPolicyID parses 'input' into a SnapshotPolicyId +func ParseSnapshotPolicyID(input string) (*SnapshotPolicyId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotPolicyId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotPolicyId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSnapshotPolicyIDInsensitively parses 'input' case-insensitively into a SnapshotPolicyId +// note: this method should only be used for API response data and not user input +func ParseSnapshotPolicyIDInsensitively(input string) (*SnapshotPolicyId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotPolicyId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotPolicyId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SnapshotPolicyId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.SnapshotPolicyName, ok = input.Parsed["snapshotPolicyName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "snapshotPolicyName", input) + } + + return nil +} + +// ValidateSnapshotPolicyID checks that 'input' can be parsed as a Snapshot Policy ID +func ValidateSnapshotPolicyID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSnapshotPolicyID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Snapshot Policy ID +func (id SnapshotPolicyId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/snapshotPolicies/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.SnapshotPolicyName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Snapshot Policy ID +func (id SnapshotPolicyId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticSnapshotPolicies", "snapshotPolicies", "snapshotPolicies"), + resourceids.UserSpecifiedSegment("snapshotPolicyName", "snapshotPolicyName"), + } +} + +// String returns a human-readable description of this Snapshot Policy ID +func (id SnapshotPolicyId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Snapshot Policy Name: %q", id.SnapshotPolicyName), + } + return fmt.Sprintf("Snapshot Policy (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/id_snapshotpolicy_test.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/id_snapshotpolicy_test.go new file mode 100644 index 00000000000..8dad1f1572b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/id_snapshotpolicy_test.go @@ -0,0 +1,327 @@ +package snapshotpolicylistvolumes + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SnapshotPolicyId{} + +func TestNewSnapshotPolicyID(t *testing.T) { + id := NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.SnapshotPolicyName != "snapshotPolicyName" { + t.Fatalf("Expected %q but got %q for Segment 'SnapshotPolicyName'", id.SnapshotPolicyName, "snapshotPolicyName") + } +} + +func TestFormatSnapshotPolicyID(t *testing.T) { + actual := NewSnapshotPolicyID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "snapshotPolicyName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSnapshotPolicyID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotPolicyId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName", + Expected: &SnapshotPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + SnapshotPolicyName: "snapshotPolicyName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotPolicyID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.SnapshotPolicyName != v.Expected.SnapshotPolicyName { + t.Fatalf("Expected %q but got %q for SnapshotPolicyName", v.Expected.SnapshotPolicyName, actual.SnapshotPolicyName) + } + + } +} + +func TestParseSnapshotPolicyIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotPolicyId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/sNaPsHoTpOlIcIeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName", + Expected: &SnapshotPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + SnapshotPolicyName: "snapshotPolicyName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/snapshotPolicies/snapshotPolicyName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/sNaPsHoTpOlIcIeS/sNaPsHoTpOlIcYnAmE", + Expected: &SnapshotPolicyId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + SnapshotPolicyName: "sNaPsHoTpOlIcYnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/sNaPsHoTpOlIcIeS/sNaPsHoTpOlIcYnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotPolicyIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.SnapshotPolicyName != v.Expected.SnapshotPolicyName { + t.Fatalf("Expected %q but got %q for SnapshotPolicyName", v.Expected.SnapshotPolicyName, actual.SnapshotPolicyName) + } + + } +} + +func TestSegmentsForSnapshotPolicyId(t *testing.T) { + segments := SnapshotPolicyId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SnapshotPolicyId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/method_snapshotpolicieslistvolumes.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/method_snapshotpolicieslistvolumes.go new file mode 100644 index 00000000000..88466bae80e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/method_snapshotpolicieslistvolumes.go @@ -0,0 +1,54 @@ +package snapshotpolicylistvolumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPoliciesListVolumesOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SnapshotPolicyVolumeList +} + +// SnapshotPoliciesListVolumes ... +func (c SnapshotPolicyListVolumesClient) SnapshotPoliciesListVolumes(ctx context.Context, id SnapshotPolicyId) (result SnapshotPoliciesListVolumesOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/volumes", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SnapshotPolicyVolumeList + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_destinationreplication.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_destinationreplication.go new file mode 100644 index 00000000000..c4f17a989ac --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_destinationreplication.go @@ -0,0 +1,11 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DestinationReplication struct { + Region *string `json:"region,omitempty"` + ReplicationType *ReplicationType `json:"replicationType,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + Zone *string `json:"zone,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_exportpolicyrule.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_exportpolicyrule.go new file mode 100644 index 00000000000..dd6bb7a20fc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_exportpolicyrule.go @@ -0,0 +1,22 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExportPolicyRule struct { + AllowedClients *string `json:"allowedClients,omitempty"` + ChownMode *ChownMode `json:"chownMode,omitempty"` + Cifs *bool `json:"cifs,omitempty"` + HasRootAccess *bool `json:"hasRootAccess,omitempty"` + Kerberos5ReadOnly *bool `json:"kerberos5ReadOnly,omitempty"` + Kerberos5ReadWrite *bool `json:"kerberos5ReadWrite,omitempty"` + Kerberos5iReadOnly *bool `json:"kerberos5iReadOnly,omitempty"` + Kerberos5iReadWrite *bool `json:"kerberos5iReadWrite,omitempty"` + Kerberos5pReadOnly *bool `json:"kerberos5pReadOnly,omitempty"` + Kerberos5pReadWrite *bool `json:"kerberos5pReadWrite,omitempty"` + Nfsv3 *bool `json:"nfsv3,omitempty"` + Nfsv41 *bool `json:"nfsv41,omitempty"` + RuleIndex *int64 `json:"ruleIndex,omitempty"` + UnixReadOnly *bool `json:"unixReadOnly,omitempty"` + UnixReadWrite *bool `json:"unixReadWrite,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_mounttargetproperties.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_mounttargetproperties.go new file mode 100644 index 00000000000..49f9f6f131a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_mounttargetproperties.go @@ -0,0 +1,11 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MountTargetProperties struct { + FileSystemId string `json:"fileSystemId"` + IPAddress *string `json:"ipAddress,omitempty"` + MountTargetId *string `json:"mountTargetId,omitempty"` + SmbServerFqdn *string `json:"smbServerFqdn,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_placementkeyvaluepairs.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_placementkeyvaluepairs.go new file mode 100644 index 00000000000..0d5756a7089 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_placementkeyvaluepairs.go @@ -0,0 +1,9 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PlacementKeyValuePairs struct { + Key string `json:"key"` + Value string `json:"value"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_remotepath.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_remotepath.go new file mode 100644 index 00000000000..15c1c316b42 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_remotepath.go @@ -0,0 +1,10 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RemotePath struct { + ExternalHostName string `json:"externalHostName"` + ServerName string `json:"serverName"` + VolumeName string `json:"volumeName"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_replicationobject.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_replicationobject.go new file mode 100644 index 00000000000..7367cde8bf9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_replicationobject.go @@ -0,0 +1,14 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReplicationObject struct { + DestinationReplications *[]DestinationReplication `json:"destinationReplications,omitempty"` + EndpointType *EndpointType `json:"endpointType,omitempty"` + RemotePath *RemotePath `json:"remotePath,omitempty"` + RemoteVolumeRegion *string `json:"remoteVolumeRegion,omitempty"` + RemoteVolumeResourceId *string `json:"remoteVolumeResourceId,omitempty"` + ReplicationId *string `json:"replicationId,omitempty"` + ReplicationSchedule *ReplicationSchedule `json:"replicationSchedule,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_snapshotpolicyvolumelist.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_snapshotpolicyvolumelist.go new file mode 100644 index 00000000000..2d14e9596fe --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_snapshotpolicyvolumelist.go @@ -0,0 +1,8 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotPolicyVolumeList struct { + Value *[]Volume `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volume.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volume.go new file mode 100644 index 00000000000..36417f52891 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volume.go @@ -0,0 +1,21 @@ +package snapshotpolicylistvolumes + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" + "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Volume struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties VolumeProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` + Zones *zones.Schema `json:"zones,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumebackupproperties.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumebackupproperties.go new file mode 100644 index 00000000000..be2a40ea08d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumebackupproperties.go @@ -0,0 +1,10 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeBackupProperties struct { + BackupPolicyId *string `json:"backupPolicyId,omitempty"` + BackupVaultId *string `json:"backupVaultId,omitempty"` + PolicyEnforced *bool `json:"policyEnforced,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumeproperties.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumeproperties.go new file mode 100644 index 00000000000..8f2e04be62e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumeproperties.go @@ -0,0 +1,65 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeProperties struct { + AcceptGrowCapacityPoolForShortTermCloneSplit *AcceptGrowCapacityPoolForShortTermCloneSplit `json:"acceptGrowCapacityPoolForShortTermCloneSplit,omitempty"` + ActualThroughputMibps *float64 `json:"actualThroughputMibps,omitempty"` + AvsDataStore *AvsDataStore `json:"avsDataStore,omitempty"` + BackupId *string `json:"backupId,omitempty"` + BaremetalTenantId *string `json:"baremetalTenantId,omitempty"` + CapacityPoolResourceId *string `json:"capacityPoolResourceId,omitempty"` + CloneProgress *int64 `json:"cloneProgress,omitempty"` + CoolAccess *bool `json:"coolAccess,omitempty"` + CoolAccessRetrievalPolicy *CoolAccessRetrievalPolicy `json:"coolAccessRetrievalPolicy,omitempty"` + CoolAccessTieringPolicy *CoolAccessTieringPolicy `json:"coolAccessTieringPolicy,omitempty"` + CoolnessPeriod *int64 `json:"coolnessPeriod,omitempty"` + CreationToken string `json:"creationToken"` + DataProtection *VolumePropertiesDataProtection `json:"dataProtection,omitempty"` + DataStoreResourceId *[]string `json:"dataStoreResourceId,omitempty"` + DefaultGroupQuotaInKiBs *int64 `json:"defaultGroupQuotaInKiBs,omitempty"` + DefaultUserQuotaInKiBs *int64 `json:"defaultUserQuotaInKiBs,omitempty"` + DeleteBaseSnapshot *bool `json:"deleteBaseSnapshot,omitempty"` + EffectiveNetworkFeatures *NetworkFeatures `json:"effectiveNetworkFeatures,omitempty"` + EnableSubvolumes *EnableSubvolumes `json:"enableSubvolumes,omitempty"` + Encrypted *bool `json:"encrypted,omitempty"` + EncryptionKeySource *EncryptionKeySource `json:"encryptionKeySource,omitempty"` + ExportPolicy *VolumePropertiesExportPolicy `json:"exportPolicy,omitempty"` + FileAccessLogs *FileAccessLogs `json:"fileAccessLogs,omitempty"` + FileSystemId *string `json:"fileSystemId,omitempty"` + InheritedSizeInBytes *int64 `json:"inheritedSizeInBytes,omitempty"` + IsDefaultQuotaEnabled *bool `json:"isDefaultQuotaEnabled,omitempty"` + IsLargeVolume *bool `json:"isLargeVolume,omitempty"` + IsRestoring *bool `json:"isRestoring,omitempty"` + KerberosEnabled *bool `json:"kerberosEnabled,omitempty"` + KeyVaultPrivateEndpointResourceId *string `json:"keyVaultPrivateEndpointResourceId,omitempty"` + LdapEnabled *bool `json:"ldapEnabled,omitempty"` + MaximumNumberOfFiles *int64 `json:"maximumNumberOfFiles,omitempty"` + MountTargets *[]MountTargetProperties `json:"mountTargets,omitempty"` + NetworkFeatures *NetworkFeatures `json:"networkFeatures,omitempty"` + NetworkSiblingSetId *string `json:"networkSiblingSetId,omitempty"` + OriginatingResourceId *string `json:"originatingResourceId,omitempty"` + PlacementRules *[]PlacementKeyValuePairs `json:"placementRules,omitempty"` + ProtocolTypes *[]string `json:"protocolTypes,omitempty"` + ProvisionedAvailabilityZone *string `json:"provisionedAvailabilityZone,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroup *string `json:"proximityPlacementGroup,omitempty"` + SecurityStyle *SecurityStyle `json:"securityStyle,omitempty"` + ServiceLevel *ServiceLevel `json:"serviceLevel,omitempty"` + SmbAccessBasedEnumeration *SmbAccessBasedEnumeration `json:"smbAccessBasedEnumeration,omitempty"` + SmbContinuouslyAvailable *bool `json:"smbContinuouslyAvailable,omitempty"` + SmbEncryption *bool `json:"smbEncryption,omitempty"` + SmbNonBrowsable *SmbNonBrowsable `json:"smbNonBrowsable,omitempty"` + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty"` + SnapshotId *string `json:"snapshotId,omitempty"` + StorageToNetworkProximity *VolumeStorageToNetworkProximity `json:"storageToNetworkProximity,omitempty"` + SubnetId string `json:"subnetId"` + T2Network *string `json:"t2Network,omitempty"` + ThroughputMibps *float64 `json:"throughputMibps,omitempty"` + UnixPermissions *string `json:"unixPermissions,omitempty"` + UsageThreshold int64 `json:"usageThreshold"` + VolumeGroupName *string `json:"volumeGroupName,omitempty"` + VolumeSpecName *string `json:"volumeSpecName,omitempty"` + VolumeType *string `json:"volumeType,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumepropertiesdataprotection.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumepropertiesdataprotection.go new file mode 100644 index 00000000000..bf5ff10761d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumepropertiesdataprotection.go @@ -0,0 +1,11 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesDataProtection struct { + Backup *VolumeBackupProperties `json:"backup,omitempty"` + Replication *ReplicationObject `json:"replication,omitempty"` + Snapshot *VolumeSnapshotProperties `json:"snapshot,omitempty"` + VolumeRelocation *VolumeRelocationProperties `json:"volumeRelocation,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumepropertiesexportpolicy.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumepropertiesexportpolicy.go new file mode 100644 index 00000000000..1ad178d6e25 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumepropertiesexportpolicy.go @@ -0,0 +1,8 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesExportPolicy struct { + Rules *[]ExportPolicyRule `json:"rules,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumerelocationproperties.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumerelocationproperties.go new file mode 100644 index 00000000000..26ffc8f0697 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumerelocationproperties.go @@ -0,0 +1,9 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeRelocationProperties struct { + ReadyToBeFinalized *bool `json:"readyToBeFinalized,omitempty"` + RelocationRequested *bool `json:"relocationRequested,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumesnapshotproperties.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumesnapshotproperties.go new file mode 100644 index 00000000000..e4915375144 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/model_volumesnapshotproperties.go @@ -0,0 +1,8 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeSnapshotProperties struct { + SnapshotPolicyId *string `json:"snapshotPolicyId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/version.go b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/version.go new file mode 100644 index 00000000000..c795921ab08 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshotpolicylistvolumes/version.go @@ -0,0 +1,10 @@ +package snapshotpolicylistvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/snapshotpolicylistvolumes/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/README.md b/resource-manager/netapp/2025-06-01/snapshots/README.md new file mode 100644 index 00000000000..ef3cf291bff --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/README.md @@ -0,0 +1,111 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshots` Documentation + +The `snapshots` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/snapshots" +``` + + +### Client Initialization + +```go +client := snapshots.NewSnapshotsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `SnapshotsClient.Create` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "snapshotName") + +payload := snapshots.Snapshot{ + // ... +} + + +if err := client.CreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `SnapshotsClient.Delete` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "snapshotName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `SnapshotsClient.Get` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "snapshotName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotsClient.List` + +```go +ctx := context.TODO() +id := snapshots.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +read, err := client.List(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SnapshotsClient.RestoreFiles` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "snapshotName") + +payload := snapshots.SnapshotRestoreFiles{ + // ... +} + + +if err := client.RestoreFilesThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `SnapshotsClient.Update` + +```go +ctx := context.TODO() +id := snapshots.NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "snapshotName") +var payload interface{} + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/snapshots/client.go b/resource-manager/netapp/2025-06-01/snapshots/client.go new file mode 100644 index 00000000000..678a64470aa --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/client.go @@ -0,0 +1,26 @@ +package snapshots + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotsClient struct { + Client *resourcemanager.Client +} + +func NewSnapshotsClientWithBaseURI(sdkApi sdkEnv.Api) (*SnapshotsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "snapshots", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating SnapshotsClient: %+v", err) + } + + return &SnapshotsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/id_snapshot.go b/resource-manager/netapp/2025-06-01/snapshots/id_snapshot.go new file mode 100644 index 00000000000..2fe36d4f5a9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/id_snapshot.go @@ -0,0 +1,157 @@ +package snapshots + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SnapshotId{}) +} + +var _ resourceids.ResourceId = &SnapshotId{} + +// SnapshotId is a struct representing the Resource ID for a Snapshot +type SnapshotId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string + SnapshotName string +} + +// NewSnapshotID returns a new SnapshotId struct +func NewSnapshotID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string, snapshotName string) SnapshotId { + return SnapshotId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + SnapshotName: snapshotName, + } +} + +// ParseSnapshotID parses 'input' into a SnapshotId +func ParseSnapshotID(input string) (*SnapshotId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSnapshotIDInsensitively parses 'input' case-insensitively into a SnapshotId +// note: this method should only be used for API response data and not user input +func ParseSnapshotIDInsensitively(input string) (*SnapshotId, error) { + parser := resourceids.NewParserFromResourceIdType(&SnapshotId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SnapshotId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SnapshotId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + if id.SnapshotName, ok = input.Parsed["snapshotName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "snapshotName", input) + } + + return nil +} + +// ValidateSnapshotID checks that 'input' can be parsed as a Snapshot ID +func ValidateSnapshotID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSnapshotID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Snapshot ID +func (id SnapshotId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s/snapshots/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName, id.SnapshotName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Snapshot ID +func (id SnapshotId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + resourceids.StaticSegment("staticSnapshots", "snapshots", "snapshots"), + resourceids.UserSpecifiedSegment("snapshotName", "snapshotName"), + } +} + +// String returns a human-readable description of this Snapshot ID +func (id SnapshotId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + fmt.Sprintf("Snapshot Name: %q", id.SnapshotName), + } + return fmt.Sprintf("Snapshot (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/id_snapshot_test.go b/resource-manager/netapp/2025-06-01/snapshots/id_snapshot_test.go new file mode 100644 index 00000000000..1aae8e6ed5c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/id_snapshot_test.go @@ -0,0 +1,417 @@ +package snapshots + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SnapshotId{} + +func TestNewSnapshotID(t *testing.T) { + id := NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "snapshotName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } + + if id.SnapshotName != "snapshotName" { + t.Fatalf("Expected %q but got %q for Segment 'SnapshotName'", id.SnapshotName, "snapshotName") + } +} + +func TestFormatSnapshotID(t *testing.T) { + actual := NewSnapshotID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "snapshotName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/snapshots/snapshotName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSnapshotID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/snapshots", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/snapshots/snapshotName", + Expected: &SnapshotId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + SnapshotName: "snapshotName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/snapshots/snapshotName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + if actual.SnapshotName != v.Expected.SnapshotName { + t.Fatalf("Expected %q but got %q for SnapshotName", v.Expected.SnapshotName, actual.SnapshotName) + } + + } +} + +func TestParseSnapshotIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SnapshotId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/snapshots", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/sNaPsHoTs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/snapshots/snapshotName", + Expected: &SnapshotId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + SnapshotName: "snapshotName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/snapshots/snapshotName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/sNaPsHoTs/sNaPsHoTnAmE", + Expected: &SnapshotId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + SnapshotName: "sNaPsHoTnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/sNaPsHoTs/sNaPsHoTnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSnapshotIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + if actual.SnapshotName != v.Expected.SnapshotName { + t.Fatalf("Expected %q but got %q for SnapshotName", v.Expected.SnapshotName, actual.SnapshotName) + } + + } +} + +func TestSegmentsForSnapshotId(t *testing.T) { + segments := SnapshotId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SnapshotId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/id_volume.go b/resource-manager/netapp/2025-06-01/snapshots/id_volume.go new file mode 100644 index 00000000000..dcd1ab5e98b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/id_volume.go @@ -0,0 +1,148 @@ +package snapshots + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/id_volume_test.go b/resource-manager/netapp/2025-06-01/snapshots/id_volume_test.go new file mode 100644 index 00000000000..010624ba420 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/id_volume_test.go @@ -0,0 +1,372 @@ +package snapshots + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/method_create.go b/resource-manager/netapp/2025-06-01/snapshots/method_create.go new file mode 100644 index 00000000000..6d6b9a4fbe2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/method_create.go @@ -0,0 +1,75 @@ +package snapshots + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Snapshot +} + +// Create ... +func (c SnapshotsClient) Create(ctx context.Context, id SnapshotId, input Snapshot) (result CreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateThenPoll performs Create then polls until it's completed +func (c SnapshotsClient) CreateThenPoll(ctx context.Context, id SnapshotId, input Snapshot) error { + result, err := c.Create(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Create: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Create: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/method_delete.go b/resource-manager/netapp/2025-06-01/snapshots/method_delete.go new file mode 100644 index 00000000000..ef16480f860 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/method_delete.go @@ -0,0 +1,71 @@ +package snapshots + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c SnapshotsClient) Delete(ctx context.Context, id SnapshotId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c SnapshotsClient) DeleteThenPoll(ctx context.Context, id SnapshotId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/method_get.go b/resource-manager/netapp/2025-06-01/snapshots/method_get.go new file mode 100644 index 00000000000..68b676f0115 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/method_get.go @@ -0,0 +1,53 @@ +package snapshots + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Snapshot +} + +// Get ... +func (c SnapshotsClient) Get(ctx context.Context, id SnapshotId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Snapshot + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/method_list.go b/resource-manager/netapp/2025-06-01/snapshots/method_list.go new file mode 100644 index 00000000000..77c6da22159 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/method_list.go @@ -0,0 +1,54 @@ +package snapshots + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SnapshotsList +} + +// List ... +func (c SnapshotsClient) List(ctx context.Context, id VolumeId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/snapshots", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SnapshotsList + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/method_restorefiles.go b/resource-manager/netapp/2025-06-01/snapshots/method_restorefiles.go new file mode 100644 index 00000000000..685be44a5ff --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/method_restorefiles.go @@ -0,0 +1,74 @@ +package snapshots + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RestoreFilesOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// RestoreFiles ... +func (c SnapshotsClient) RestoreFiles(ctx context.Context, id SnapshotId, input SnapshotRestoreFiles) (result RestoreFilesOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/restoreFiles", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// RestoreFilesThenPoll performs RestoreFiles then polls until it's completed +func (c SnapshotsClient) RestoreFilesThenPoll(ctx context.Context, id SnapshotId, input SnapshotRestoreFiles) error { + result, err := c.RestoreFiles(ctx, id, input) + if err != nil { + return fmt.Errorf("performing RestoreFiles: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after RestoreFiles: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/method_update.go b/resource-manager/netapp/2025-06-01/snapshots/method_update.go new file mode 100644 index 00000000000..1e85e294ee5 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/method_update.go @@ -0,0 +1,75 @@ +package snapshots + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Snapshot +} + +// Update ... +func (c SnapshotsClient) Update(ctx context.Context, id SnapshotId, input interface{}) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c SnapshotsClient) UpdateThenPoll(ctx context.Context, id SnapshotId, input interface{}) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/model_snapshot.go b/resource-manager/netapp/2025-06-01/snapshots/model_snapshot.go new file mode 100644 index 00000000000..fe53e779a4c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/model_snapshot.go @@ -0,0 +1,17 @@ +package snapshots + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Snapshot struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *SnapshotProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/model_snapshotproperties.go b/resource-manager/netapp/2025-06-01/snapshots/model_snapshotproperties.go new file mode 100644 index 00000000000..fe60537eaaa --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/model_snapshotproperties.go @@ -0,0 +1,28 @@ +package snapshots + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotProperties struct { + Created *string `json:"created,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + SnapshotId *string `json:"snapshotId,omitempty"` +} + +func (o *SnapshotProperties) GetCreatedAsTime() (*time.Time, error) { + if o.Created == nil { + return nil, nil + } + return dates.ParseAsFormat(o.Created, "2006-01-02T15:04:05Z07:00") +} + +func (o *SnapshotProperties) SetCreatedAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.Created = &formatted +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/model_snapshotrestorefiles.go b/resource-manager/netapp/2025-06-01/snapshots/model_snapshotrestorefiles.go new file mode 100644 index 00000000000..de80d971dcf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/model_snapshotrestorefiles.go @@ -0,0 +1,9 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotRestoreFiles struct { + DestinationPath *string `json:"destinationPath,omitempty"` + FilePaths []string `json:"filePaths"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/model_snapshotslist.go b/resource-manager/netapp/2025-06-01/snapshots/model_snapshotslist.go new file mode 100644 index 00000000000..bf283b0d2ac --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/model_snapshotslist.go @@ -0,0 +1,8 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SnapshotsList struct { + Value *[]Snapshot `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/snapshots/version.go b/resource-manager/netapp/2025-06-01/snapshots/version.go new file mode 100644 index 00000000000..aed4ca9af96 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/snapshots/version.go @@ -0,0 +1,10 @@ +package snapshots + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/snapshots/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/README.md b/resource-manager/netapp/2025-06-01/splitclonevolume/README.md new file mode 100644 index 00000000000..e30ee3938f3 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/README.md @@ -0,0 +1,32 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/splitclonevolume` Documentation + +The `splitclonevolume` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/splitclonevolume" +``` + + +### Client Initialization + +```go +client := splitclonevolume.NewSplitCloneVolumeClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `SplitCloneVolumeClient.VolumesSplitCloneFromParent` + +```go +ctx := context.TODO() +id := splitclonevolume.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesSplitCloneFromParentThenPoll(ctx, id); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/client.go b/resource-manager/netapp/2025-06-01/splitclonevolume/client.go new file mode 100644 index 00000000000..a7cf60f8da4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/client.go @@ -0,0 +1,26 @@ +package splitclonevolume + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SplitCloneVolumeClient struct { + Client *resourcemanager.Client +} + +func NewSplitCloneVolumeClientWithBaseURI(sdkApi sdkEnv.Api) (*SplitCloneVolumeClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "splitclonevolume", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating SplitCloneVolumeClient: %+v", err) + } + + return &SplitCloneVolumeClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/constants.go b/resource-manager/netapp/2025-06-01/splitclonevolume/constants.go new file mode 100644 index 00000000000..908bb11c3d1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/constants.go @@ -0,0 +1,734 @@ +package splitclonevolume + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AcceptGrowCapacityPoolForShortTermCloneSplit string + +const ( + AcceptGrowCapacityPoolForShortTermCloneSplitAccepted AcceptGrowCapacityPoolForShortTermCloneSplit = "Accepted" + AcceptGrowCapacityPoolForShortTermCloneSplitDeclined AcceptGrowCapacityPoolForShortTermCloneSplit = "Declined" +) + +func PossibleValuesForAcceptGrowCapacityPoolForShortTermCloneSplit() []string { + return []string{ + string(AcceptGrowCapacityPoolForShortTermCloneSplitAccepted), + string(AcceptGrowCapacityPoolForShortTermCloneSplitDeclined), + } +} + +func (s *AcceptGrowCapacityPoolForShortTermCloneSplit) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAcceptGrowCapacityPoolForShortTermCloneSplit(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAcceptGrowCapacityPoolForShortTermCloneSplit(input string) (*AcceptGrowCapacityPoolForShortTermCloneSplit, error) { + vals := map[string]AcceptGrowCapacityPoolForShortTermCloneSplit{ + "accepted": AcceptGrowCapacityPoolForShortTermCloneSplitAccepted, + "declined": AcceptGrowCapacityPoolForShortTermCloneSplitDeclined, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AcceptGrowCapacityPoolForShortTermCloneSplit(input) + return &out, nil +} + +type AvsDataStore string + +const ( + AvsDataStoreDisabled AvsDataStore = "Disabled" + AvsDataStoreEnabled AvsDataStore = "Enabled" +) + +func PossibleValuesForAvsDataStore() []string { + return []string{ + string(AvsDataStoreDisabled), + string(AvsDataStoreEnabled), + } +} + +func (s *AvsDataStore) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAvsDataStore(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAvsDataStore(input string) (*AvsDataStore, error) { + vals := map[string]AvsDataStore{ + "disabled": AvsDataStoreDisabled, + "enabled": AvsDataStoreEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AvsDataStore(input) + return &out, nil +} + +type ChownMode string + +const ( + ChownModeRestricted ChownMode = "Restricted" + ChownModeUnrestricted ChownMode = "Unrestricted" +) + +func PossibleValuesForChownMode() []string { + return []string{ + string(ChownModeRestricted), + string(ChownModeUnrestricted), + } +} + +func (s *ChownMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseChownMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseChownMode(input string) (*ChownMode, error) { + vals := map[string]ChownMode{ + "restricted": ChownModeRestricted, + "unrestricted": ChownModeUnrestricted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ChownMode(input) + return &out, nil +} + +type CoolAccessRetrievalPolicy string + +const ( + CoolAccessRetrievalPolicyDefault CoolAccessRetrievalPolicy = "Default" + CoolAccessRetrievalPolicyNever CoolAccessRetrievalPolicy = "Never" + CoolAccessRetrievalPolicyOnRead CoolAccessRetrievalPolicy = "OnRead" +) + +func PossibleValuesForCoolAccessRetrievalPolicy() []string { + return []string{ + string(CoolAccessRetrievalPolicyDefault), + string(CoolAccessRetrievalPolicyNever), + string(CoolAccessRetrievalPolicyOnRead), + } +} + +func (s *CoolAccessRetrievalPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessRetrievalPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessRetrievalPolicy(input string) (*CoolAccessRetrievalPolicy, error) { + vals := map[string]CoolAccessRetrievalPolicy{ + "default": CoolAccessRetrievalPolicyDefault, + "never": CoolAccessRetrievalPolicyNever, + "onread": CoolAccessRetrievalPolicyOnRead, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessRetrievalPolicy(input) + return &out, nil +} + +type CoolAccessTieringPolicy string + +const ( + CoolAccessTieringPolicyAuto CoolAccessTieringPolicy = "Auto" + CoolAccessTieringPolicySnapshotOnly CoolAccessTieringPolicy = "SnapshotOnly" +) + +func PossibleValuesForCoolAccessTieringPolicy() []string { + return []string{ + string(CoolAccessTieringPolicyAuto), + string(CoolAccessTieringPolicySnapshotOnly), + } +} + +func (s *CoolAccessTieringPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessTieringPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessTieringPolicy(input string) (*CoolAccessTieringPolicy, error) { + vals := map[string]CoolAccessTieringPolicy{ + "auto": CoolAccessTieringPolicyAuto, + "snapshotonly": CoolAccessTieringPolicySnapshotOnly, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessTieringPolicy(input) + return &out, nil +} + +type EnableSubvolumes string + +const ( + EnableSubvolumesDisabled EnableSubvolumes = "Disabled" + EnableSubvolumesEnabled EnableSubvolumes = "Enabled" +) + +func PossibleValuesForEnableSubvolumes() []string { + return []string{ + string(EnableSubvolumesDisabled), + string(EnableSubvolumesEnabled), + } +} + +func (s *EnableSubvolumes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEnableSubvolumes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEnableSubvolumes(input string) (*EnableSubvolumes, error) { + vals := map[string]EnableSubvolumes{ + "disabled": EnableSubvolumesDisabled, + "enabled": EnableSubvolumesEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EnableSubvolumes(input) + return &out, nil +} + +type EncryptionKeySource string + +const ( + EncryptionKeySourceMicrosoftPointKeyVault EncryptionKeySource = "Microsoft.KeyVault" + EncryptionKeySourceMicrosoftPointNetApp EncryptionKeySource = "Microsoft.NetApp" +) + +func PossibleValuesForEncryptionKeySource() []string { + return []string{ + string(EncryptionKeySourceMicrosoftPointKeyVault), + string(EncryptionKeySourceMicrosoftPointNetApp), + } +} + +func (s *EncryptionKeySource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEncryptionKeySource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEncryptionKeySource(input string) (*EncryptionKeySource, error) { + vals := map[string]EncryptionKeySource{ + "microsoft.keyvault": EncryptionKeySourceMicrosoftPointKeyVault, + "microsoft.netapp": EncryptionKeySourceMicrosoftPointNetApp, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EncryptionKeySource(input) + return &out, nil +} + +type EndpointType string + +const ( + EndpointTypeDst EndpointType = "dst" + EndpointTypeSrc EndpointType = "src" +) + +func PossibleValuesForEndpointType() []string { + return []string{ + string(EndpointTypeDst), + string(EndpointTypeSrc), + } +} + +func (s *EndpointType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEndpointType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEndpointType(input string) (*EndpointType, error) { + vals := map[string]EndpointType{ + "dst": EndpointTypeDst, + "src": EndpointTypeSrc, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EndpointType(input) + return &out, nil +} + +type FileAccessLogs string + +const ( + FileAccessLogsDisabled FileAccessLogs = "Disabled" + FileAccessLogsEnabled FileAccessLogs = "Enabled" +) + +func PossibleValuesForFileAccessLogs() []string { + return []string{ + string(FileAccessLogsDisabled), + string(FileAccessLogsEnabled), + } +} + +func (s *FileAccessLogs) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseFileAccessLogs(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseFileAccessLogs(input string) (*FileAccessLogs, error) { + vals := map[string]FileAccessLogs{ + "disabled": FileAccessLogsDisabled, + "enabled": FileAccessLogsEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := FileAccessLogs(input) + return &out, nil +} + +type NetworkFeatures string + +const ( + NetworkFeaturesBasic NetworkFeatures = "Basic" + NetworkFeaturesBasicStandard NetworkFeatures = "Basic_Standard" + NetworkFeaturesStandard NetworkFeatures = "Standard" + NetworkFeaturesStandardBasic NetworkFeatures = "Standard_Basic" +) + +func PossibleValuesForNetworkFeatures() []string { + return []string{ + string(NetworkFeaturesBasic), + string(NetworkFeaturesBasicStandard), + string(NetworkFeaturesStandard), + string(NetworkFeaturesStandardBasic), + } +} + +func (s *NetworkFeatures) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkFeatures(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkFeatures(input string) (*NetworkFeatures, error) { + vals := map[string]NetworkFeatures{ + "basic": NetworkFeaturesBasic, + "basic_standard": NetworkFeaturesBasicStandard, + "standard": NetworkFeaturesStandard, + "standard_basic": NetworkFeaturesStandardBasic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkFeatures(input) + return &out, nil +} + +type ReplicationSchedule string + +const ( + ReplicationScheduleDaily ReplicationSchedule = "daily" + ReplicationScheduleHourly ReplicationSchedule = "hourly" + ReplicationScheduleOneZerominutely ReplicationSchedule = "_10minutely" +) + +func PossibleValuesForReplicationSchedule() []string { + return []string{ + string(ReplicationScheduleDaily), + string(ReplicationScheduleHourly), + string(ReplicationScheduleOneZerominutely), + } +} + +func (s *ReplicationSchedule) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationSchedule(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationSchedule(input string) (*ReplicationSchedule, error) { + vals := map[string]ReplicationSchedule{ + "daily": ReplicationScheduleDaily, + "hourly": ReplicationScheduleHourly, + "_10minutely": ReplicationScheduleOneZerominutely, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationSchedule(input) + return &out, nil +} + +type ReplicationType string + +const ( + ReplicationTypeCrossRegionReplication ReplicationType = "CrossRegionReplication" + ReplicationTypeCrossZoneReplication ReplicationType = "CrossZoneReplication" +) + +func PossibleValuesForReplicationType() []string { + return []string{ + string(ReplicationTypeCrossRegionReplication), + string(ReplicationTypeCrossZoneReplication), + } +} + +func (s *ReplicationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationType(input string) (*ReplicationType, error) { + vals := map[string]ReplicationType{ + "crossregionreplication": ReplicationTypeCrossRegionReplication, + "crosszonereplication": ReplicationTypeCrossZoneReplication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationType(input) + return &out, nil +} + +type SecurityStyle string + +const ( + SecurityStyleNtfs SecurityStyle = "ntfs" + SecurityStyleUnix SecurityStyle = "unix" +) + +func PossibleValuesForSecurityStyle() []string { + return []string{ + string(SecurityStyleNtfs), + string(SecurityStyleUnix), + } +} + +func (s *SecurityStyle) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSecurityStyle(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSecurityStyle(input string) (*SecurityStyle, error) { + vals := map[string]SecurityStyle{ + "ntfs": SecurityStyleNtfs, + "unix": SecurityStyleUnix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SecurityStyle(input) + return &out, nil +} + +type ServiceLevel string + +const ( + ServiceLevelFlexible ServiceLevel = "Flexible" + ServiceLevelPremium ServiceLevel = "Premium" + ServiceLevelStandard ServiceLevel = "Standard" + ServiceLevelStandardZRS ServiceLevel = "StandardZRS" + ServiceLevelUltra ServiceLevel = "Ultra" +) + +func PossibleValuesForServiceLevel() []string { + return []string{ + string(ServiceLevelFlexible), + string(ServiceLevelPremium), + string(ServiceLevelStandard), + string(ServiceLevelStandardZRS), + string(ServiceLevelUltra), + } +} + +func (s *ServiceLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceLevel(input string) (*ServiceLevel, error) { + vals := map[string]ServiceLevel{ + "flexible": ServiceLevelFlexible, + "premium": ServiceLevelPremium, + "standard": ServiceLevelStandard, + "standardzrs": ServiceLevelStandardZRS, + "ultra": ServiceLevelUltra, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceLevel(input) + return &out, nil +} + +type SmbAccessBasedEnumeration string + +const ( + SmbAccessBasedEnumerationDisabled SmbAccessBasedEnumeration = "Disabled" + SmbAccessBasedEnumerationEnabled SmbAccessBasedEnumeration = "Enabled" +) + +func PossibleValuesForSmbAccessBasedEnumeration() []string { + return []string{ + string(SmbAccessBasedEnumerationDisabled), + string(SmbAccessBasedEnumerationEnabled), + } +} + +func (s *SmbAccessBasedEnumeration) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbAccessBasedEnumeration(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbAccessBasedEnumeration(input string) (*SmbAccessBasedEnumeration, error) { + vals := map[string]SmbAccessBasedEnumeration{ + "disabled": SmbAccessBasedEnumerationDisabled, + "enabled": SmbAccessBasedEnumerationEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbAccessBasedEnumeration(input) + return &out, nil +} + +type SmbNonBrowsable string + +const ( + SmbNonBrowsableDisabled SmbNonBrowsable = "Disabled" + SmbNonBrowsableEnabled SmbNonBrowsable = "Enabled" +) + +func PossibleValuesForSmbNonBrowsable() []string { + return []string{ + string(SmbNonBrowsableDisabled), + string(SmbNonBrowsableEnabled), + } +} + +func (s *SmbNonBrowsable) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbNonBrowsable(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbNonBrowsable(input string) (*SmbNonBrowsable, error) { + vals := map[string]SmbNonBrowsable{ + "disabled": SmbNonBrowsableDisabled, + "enabled": SmbNonBrowsableEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbNonBrowsable(input) + return &out, nil +} + +type VolumeStorageToNetworkProximity string + +const ( + VolumeStorageToNetworkProximityAcrossTTwo VolumeStorageToNetworkProximity = "AcrossT2" + VolumeStorageToNetworkProximityDefault VolumeStorageToNetworkProximity = "Default" + VolumeStorageToNetworkProximityTOne VolumeStorageToNetworkProximity = "T1" + VolumeStorageToNetworkProximityTTwo VolumeStorageToNetworkProximity = "T2" +) + +func PossibleValuesForVolumeStorageToNetworkProximity() []string { + return []string{ + string(VolumeStorageToNetworkProximityAcrossTTwo), + string(VolumeStorageToNetworkProximityDefault), + string(VolumeStorageToNetworkProximityTOne), + string(VolumeStorageToNetworkProximityTTwo), + } +} + +func (s *VolumeStorageToNetworkProximity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseVolumeStorageToNetworkProximity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseVolumeStorageToNetworkProximity(input string) (*VolumeStorageToNetworkProximity, error) { + vals := map[string]VolumeStorageToNetworkProximity{ + "acrosst2": VolumeStorageToNetworkProximityAcrossTTwo, + "default": VolumeStorageToNetworkProximityDefault, + "t1": VolumeStorageToNetworkProximityTOne, + "t2": VolumeStorageToNetworkProximityTTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VolumeStorageToNetworkProximity(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/id_volume.go b/resource-manager/netapp/2025-06-01/splitclonevolume/id_volume.go new file mode 100644 index 00000000000..edf6308983f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/id_volume.go @@ -0,0 +1,148 @@ +package splitclonevolume + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/id_volume_test.go b/resource-manager/netapp/2025-06-01/splitclonevolume/id_volume_test.go new file mode 100644 index 00000000000..b3e9f690cee --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/id_volume_test.go @@ -0,0 +1,372 @@ +package splitclonevolume + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/method_volumessplitclonefromparent.go b/resource-manager/netapp/2025-06-01/splitclonevolume/method_volumessplitclonefromparent.go new file mode 100644 index 00000000000..b6117507d73 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/method_volumessplitclonefromparent.go @@ -0,0 +1,71 @@ +package splitclonevolume + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesSplitCloneFromParentOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Volume +} + +// VolumesSplitCloneFromParent ... +func (c SplitCloneVolumeClient) VolumesSplitCloneFromParent(ctx context.Context, id VolumeId) (result VolumesSplitCloneFromParentOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/splitCloneFromParent", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesSplitCloneFromParentThenPoll performs VolumesSplitCloneFromParent then polls until it's completed +func (c SplitCloneVolumeClient) VolumesSplitCloneFromParentThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesSplitCloneFromParent(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesSplitCloneFromParent: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesSplitCloneFromParent: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_destinationreplication.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_destinationreplication.go new file mode 100644 index 00000000000..47ada2f21cb --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_destinationreplication.go @@ -0,0 +1,11 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DestinationReplication struct { + Region *string `json:"region,omitempty"` + ReplicationType *ReplicationType `json:"replicationType,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + Zone *string `json:"zone,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_exportpolicyrule.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_exportpolicyrule.go new file mode 100644 index 00000000000..6d63464695d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_exportpolicyrule.go @@ -0,0 +1,22 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExportPolicyRule struct { + AllowedClients *string `json:"allowedClients,omitempty"` + ChownMode *ChownMode `json:"chownMode,omitempty"` + Cifs *bool `json:"cifs,omitempty"` + HasRootAccess *bool `json:"hasRootAccess,omitempty"` + Kerberos5ReadOnly *bool `json:"kerberos5ReadOnly,omitempty"` + Kerberos5ReadWrite *bool `json:"kerberos5ReadWrite,omitempty"` + Kerberos5iReadOnly *bool `json:"kerberos5iReadOnly,omitempty"` + Kerberos5iReadWrite *bool `json:"kerberos5iReadWrite,omitempty"` + Kerberos5pReadOnly *bool `json:"kerberos5pReadOnly,omitempty"` + Kerberos5pReadWrite *bool `json:"kerberos5pReadWrite,omitempty"` + Nfsv3 *bool `json:"nfsv3,omitempty"` + Nfsv41 *bool `json:"nfsv41,omitempty"` + RuleIndex *int64 `json:"ruleIndex,omitempty"` + UnixReadOnly *bool `json:"unixReadOnly,omitempty"` + UnixReadWrite *bool `json:"unixReadWrite,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_mounttargetproperties.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_mounttargetproperties.go new file mode 100644 index 00000000000..22481855532 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_mounttargetproperties.go @@ -0,0 +1,11 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MountTargetProperties struct { + FileSystemId string `json:"fileSystemId"` + IPAddress *string `json:"ipAddress,omitempty"` + MountTargetId *string `json:"mountTargetId,omitempty"` + SmbServerFqdn *string `json:"smbServerFqdn,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_placementkeyvaluepairs.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_placementkeyvaluepairs.go new file mode 100644 index 00000000000..135f6ee93db --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_placementkeyvaluepairs.go @@ -0,0 +1,9 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PlacementKeyValuePairs struct { + Key string `json:"key"` + Value string `json:"value"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_remotepath.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_remotepath.go new file mode 100644 index 00000000000..c221903bc90 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_remotepath.go @@ -0,0 +1,10 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RemotePath struct { + ExternalHostName string `json:"externalHostName"` + ServerName string `json:"serverName"` + VolumeName string `json:"volumeName"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_replicationobject.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_replicationobject.go new file mode 100644 index 00000000000..d4f9a468025 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_replicationobject.go @@ -0,0 +1,14 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReplicationObject struct { + DestinationReplications *[]DestinationReplication `json:"destinationReplications,omitempty"` + EndpointType *EndpointType `json:"endpointType,omitempty"` + RemotePath *RemotePath `json:"remotePath,omitempty"` + RemoteVolumeRegion *string `json:"remoteVolumeRegion,omitempty"` + RemoteVolumeResourceId *string `json:"remoteVolumeResourceId,omitempty"` + ReplicationId *string `json:"replicationId,omitempty"` + ReplicationSchedule *ReplicationSchedule `json:"replicationSchedule,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_volume.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volume.go new file mode 100644 index 00000000000..3a269ccfae1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volume.go @@ -0,0 +1,21 @@ +package splitclonevolume + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" + "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Volume struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties VolumeProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` + Zones *zones.Schema `json:"zones,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumebackupproperties.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumebackupproperties.go new file mode 100644 index 00000000000..f43860322f6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumebackupproperties.go @@ -0,0 +1,10 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeBackupProperties struct { + BackupPolicyId *string `json:"backupPolicyId,omitempty"` + BackupVaultId *string `json:"backupVaultId,omitempty"` + PolicyEnforced *bool `json:"policyEnforced,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumeproperties.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumeproperties.go new file mode 100644 index 00000000000..769adca7358 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumeproperties.go @@ -0,0 +1,65 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeProperties struct { + AcceptGrowCapacityPoolForShortTermCloneSplit *AcceptGrowCapacityPoolForShortTermCloneSplit `json:"acceptGrowCapacityPoolForShortTermCloneSplit,omitempty"` + ActualThroughputMibps *float64 `json:"actualThroughputMibps,omitempty"` + AvsDataStore *AvsDataStore `json:"avsDataStore,omitempty"` + BackupId *string `json:"backupId,omitempty"` + BaremetalTenantId *string `json:"baremetalTenantId,omitempty"` + CapacityPoolResourceId *string `json:"capacityPoolResourceId,omitempty"` + CloneProgress *int64 `json:"cloneProgress,omitempty"` + CoolAccess *bool `json:"coolAccess,omitempty"` + CoolAccessRetrievalPolicy *CoolAccessRetrievalPolicy `json:"coolAccessRetrievalPolicy,omitempty"` + CoolAccessTieringPolicy *CoolAccessTieringPolicy `json:"coolAccessTieringPolicy,omitempty"` + CoolnessPeriod *int64 `json:"coolnessPeriod,omitempty"` + CreationToken string `json:"creationToken"` + DataProtection *VolumePropertiesDataProtection `json:"dataProtection,omitempty"` + DataStoreResourceId *[]string `json:"dataStoreResourceId,omitempty"` + DefaultGroupQuotaInKiBs *int64 `json:"defaultGroupQuotaInKiBs,omitempty"` + DefaultUserQuotaInKiBs *int64 `json:"defaultUserQuotaInKiBs,omitempty"` + DeleteBaseSnapshot *bool `json:"deleteBaseSnapshot,omitempty"` + EffectiveNetworkFeatures *NetworkFeatures `json:"effectiveNetworkFeatures,omitempty"` + EnableSubvolumes *EnableSubvolumes `json:"enableSubvolumes,omitempty"` + Encrypted *bool `json:"encrypted,omitempty"` + EncryptionKeySource *EncryptionKeySource `json:"encryptionKeySource,omitempty"` + ExportPolicy *VolumePropertiesExportPolicy `json:"exportPolicy,omitempty"` + FileAccessLogs *FileAccessLogs `json:"fileAccessLogs,omitempty"` + FileSystemId *string `json:"fileSystemId,omitempty"` + InheritedSizeInBytes *int64 `json:"inheritedSizeInBytes,omitempty"` + IsDefaultQuotaEnabled *bool `json:"isDefaultQuotaEnabled,omitempty"` + IsLargeVolume *bool `json:"isLargeVolume,omitempty"` + IsRestoring *bool `json:"isRestoring,omitempty"` + KerberosEnabled *bool `json:"kerberosEnabled,omitempty"` + KeyVaultPrivateEndpointResourceId *string `json:"keyVaultPrivateEndpointResourceId,omitempty"` + LdapEnabled *bool `json:"ldapEnabled,omitempty"` + MaximumNumberOfFiles *int64 `json:"maximumNumberOfFiles,omitempty"` + MountTargets *[]MountTargetProperties `json:"mountTargets,omitempty"` + NetworkFeatures *NetworkFeatures `json:"networkFeatures,omitempty"` + NetworkSiblingSetId *string `json:"networkSiblingSetId,omitempty"` + OriginatingResourceId *string `json:"originatingResourceId,omitempty"` + PlacementRules *[]PlacementKeyValuePairs `json:"placementRules,omitempty"` + ProtocolTypes *[]string `json:"protocolTypes,omitempty"` + ProvisionedAvailabilityZone *string `json:"provisionedAvailabilityZone,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroup *string `json:"proximityPlacementGroup,omitempty"` + SecurityStyle *SecurityStyle `json:"securityStyle,omitempty"` + ServiceLevel *ServiceLevel `json:"serviceLevel,omitempty"` + SmbAccessBasedEnumeration *SmbAccessBasedEnumeration `json:"smbAccessBasedEnumeration,omitempty"` + SmbContinuouslyAvailable *bool `json:"smbContinuouslyAvailable,omitempty"` + SmbEncryption *bool `json:"smbEncryption,omitempty"` + SmbNonBrowsable *SmbNonBrowsable `json:"smbNonBrowsable,omitempty"` + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty"` + SnapshotId *string `json:"snapshotId,omitempty"` + StorageToNetworkProximity *VolumeStorageToNetworkProximity `json:"storageToNetworkProximity,omitempty"` + SubnetId string `json:"subnetId"` + T2Network *string `json:"t2Network,omitempty"` + ThroughputMibps *float64 `json:"throughputMibps,omitempty"` + UnixPermissions *string `json:"unixPermissions,omitempty"` + UsageThreshold int64 `json:"usageThreshold"` + VolumeGroupName *string `json:"volumeGroupName,omitempty"` + VolumeSpecName *string `json:"volumeSpecName,omitempty"` + VolumeType *string `json:"volumeType,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumepropertiesdataprotection.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumepropertiesdataprotection.go new file mode 100644 index 00000000000..30f027b0c60 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumepropertiesdataprotection.go @@ -0,0 +1,11 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesDataProtection struct { + Backup *VolumeBackupProperties `json:"backup,omitempty"` + Replication *ReplicationObject `json:"replication,omitempty"` + Snapshot *VolumeSnapshotProperties `json:"snapshot,omitempty"` + VolumeRelocation *VolumeRelocationProperties `json:"volumeRelocation,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumepropertiesexportpolicy.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumepropertiesexportpolicy.go new file mode 100644 index 00000000000..ecd22da2628 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumepropertiesexportpolicy.go @@ -0,0 +1,8 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesExportPolicy struct { + Rules *[]ExportPolicyRule `json:"rules,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumerelocationproperties.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumerelocationproperties.go new file mode 100644 index 00000000000..fc5d598c416 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumerelocationproperties.go @@ -0,0 +1,9 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeRelocationProperties struct { + ReadyToBeFinalized *bool `json:"readyToBeFinalized,omitempty"` + RelocationRequested *bool `json:"relocationRequested,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumesnapshotproperties.go b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumesnapshotproperties.go new file mode 100644 index 00000000000..c891b2728dd --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/model_volumesnapshotproperties.go @@ -0,0 +1,8 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeSnapshotProperties struct { + SnapshotPolicyId *string `json:"snapshotPolicyId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/splitclonevolume/version.go b/resource-manager/netapp/2025-06-01/splitclonevolume/version.go new file mode 100644 index 00000000000..08abe8dcdd7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/splitclonevolume/version.go @@ -0,0 +1,10 @@ +package splitclonevolume + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/splitclonevolume/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/README.md b/resource-manager/netapp/2025-06-01/subvolumes/README.md new file mode 100644 index 00000000000..200adc7b666 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/README.md @@ -0,0 +1,111 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/subvolumes` Documentation + +The `subvolumes` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/subvolumes" +``` + + +### Client Initialization + +```go +client := subvolumes.NewSubVolumesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `SubVolumesClient.Create` + +```go +ctx := context.TODO() +id := subvolumes.NewSubVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "subVolumeName") + +payload := subvolumes.SubvolumeInfo{ + // ... +} + + +if err := client.CreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `SubVolumesClient.Delete` + +```go +ctx := context.TODO() +id := subvolumes.NewSubVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "subVolumeName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `SubVolumesClient.Get` + +```go +ctx := context.TODO() +id := subvolumes.NewSubVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "subVolumeName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `SubVolumesClient.GetMetadata` + +```go +ctx := context.TODO() +id := subvolumes.NewSubVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "subVolumeName") + +if err := client.GetMetadataThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `SubVolumesClient.ListByVolume` + +```go +ctx := context.TODO() +id := subvolumes.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +// alternatively `client.ListByVolume(ctx, id)` can be used to do batched pagination +items, err := client.ListByVolumeComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `SubVolumesClient.Update` + +```go +ctx := context.TODO() +id := subvolumes.NewSubVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "subVolumeName") + +payload := subvolumes.SubvolumePatchRequest{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/subvolumes/client.go b/resource-manager/netapp/2025-06-01/subvolumes/client.go new file mode 100644 index 00000000000..051d1e4b2ba --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/client.go @@ -0,0 +1,26 @@ +package subvolumes + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubVolumesClient struct { + Client *resourcemanager.Client +} + +func NewSubVolumesClientWithBaseURI(sdkApi sdkEnv.Api) (*SubVolumesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "subvolumes", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating SubVolumesClient: %+v", err) + } + + return &SubVolumesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/id_subvolume.go b/resource-manager/netapp/2025-06-01/subvolumes/id_subvolume.go new file mode 100644 index 00000000000..d9ceec6f9e6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/id_subvolume.go @@ -0,0 +1,157 @@ +package subvolumes + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&SubVolumeId{}) +} + +var _ resourceids.ResourceId = &SubVolumeId{} + +// SubVolumeId is a struct representing the Resource ID for a Sub Volume +type SubVolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string + SubVolumeName string +} + +// NewSubVolumeID returns a new SubVolumeId struct +func NewSubVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string, subVolumeName string) SubVolumeId { + return SubVolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + SubVolumeName: subVolumeName, + } +} + +// ParseSubVolumeID parses 'input' into a SubVolumeId +func ParseSubVolumeID(input string) (*SubVolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubVolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubVolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseSubVolumeIDInsensitively parses 'input' case-insensitively into a SubVolumeId +// note: this method should only be used for API response data and not user input +func ParseSubVolumeIDInsensitively(input string) (*SubVolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&SubVolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := SubVolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *SubVolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + if id.SubVolumeName, ok = input.Parsed["subVolumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subVolumeName", input) + } + + return nil +} + +// ValidateSubVolumeID checks that 'input' can be parsed as a Sub Volume ID +func ValidateSubVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseSubVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Sub Volume ID +func (id SubVolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s/subVolumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName, id.SubVolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Sub Volume ID +func (id SubVolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + resourceids.StaticSegment("staticSubVolumes", "subVolumes", "subVolumes"), + resourceids.UserSpecifiedSegment("subVolumeName", "subVolumeName"), + } +} + +// String returns a human-readable description of this Sub Volume ID +func (id SubVolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + fmt.Sprintf("Sub Volume Name: %q", id.SubVolumeName), + } + return fmt.Sprintf("Sub Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/id_subvolume_test.go b/resource-manager/netapp/2025-06-01/subvolumes/id_subvolume_test.go new file mode 100644 index 00000000000..c427629eb5d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/id_subvolume_test.go @@ -0,0 +1,417 @@ +package subvolumes + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &SubVolumeId{} + +func TestNewSubVolumeID(t *testing.T) { + id := NewSubVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "subVolumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } + + if id.SubVolumeName != "subVolumeName" { + t.Fatalf("Expected %q but got %q for Segment 'SubVolumeName'", id.SubVolumeName, "subVolumeName") + } +} + +func TestFormatSubVolumeID(t *testing.T) { + actual := NewSubVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "subVolumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/subVolumes/subVolumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseSubVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubVolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/subVolumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/subVolumes/subVolumeName", + Expected: &SubVolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + SubVolumeName: "subVolumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/subVolumes/subVolumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + if actual.SubVolumeName != v.Expected.SubVolumeName { + t.Fatalf("Expected %q but got %q for SubVolumeName", v.Expected.SubVolumeName, actual.SubVolumeName) + } + + } +} + +func TestParseSubVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *SubVolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/subVolumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/sUbVoLuMeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/subVolumes/subVolumeName", + Expected: &SubVolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + SubVolumeName: "subVolumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/subVolumes/subVolumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/sUbVoLuMeS/sUbVoLuMeNaMe", + Expected: &SubVolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + SubVolumeName: "sUbVoLuMeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/sUbVoLuMeS/sUbVoLuMeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseSubVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + if actual.SubVolumeName != v.Expected.SubVolumeName { + t.Fatalf("Expected %q but got %q for SubVolumeName", v.Expected.SubVolumeName, actual.SubVolumeName) + } + + } +} + +func TestSegmentsForSubVolumeId(t *testing.T) { + segments := SubVolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("SubVolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/id_volume.go b/resource-manager/netapp/2025-06-01/subvolumes/id_volume.go new file mode 100644 index 00000000000..1b621f47327 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/id_volume.go @@ -0,0 +1,148 @@ +package subvolumes + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/id_volume_test.go b/resource-manager/netapp/2025-06-01/subvolumes/id_volume_test.go new file mode 100644 index 00000000000..31c7ff8fe02 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/id_volume_test.go @@ -0,0 +1,372 @@ +package subvolumes + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/method_create.go b/resource-manager/netapp/2025-06-01/subvolumes/method_create.go new file mode 100644 index 00000000000..bbfe4a236ba --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/method_create.go @@ -0,0 +1,76 @@ +package subvolumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *SubvolumeInfo +} + +// Create ... +func (c SubVolumesClient) Create(ctx context.Context, id SubVolumeId, input SubvolumeInfo) (result CreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateThenPoll performs Create then polls until it's completed +func (c SubVolumesClient) CreateThenPoll(ctx context.Context, id SubVolumeId, input SubvolumeInfo) error { + result, err := c.Create(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Create: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Create: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/method_delete.go b/resource-manager/netapp/2025-06-01/subvolumes/method_delete.go new file mode 100644 index 00000000000..2f19ed22f0c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/method_delete.go @@ -0,0 +1,71 @@ +package subvolumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c SubVolumesClient) Delete(ctx context.Context, id SubVolumeId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c SubVolumesClient) DeleteThenPoll(ctx context.Context, id SubVolumeId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/method_get.go b/resource-manager/netapp/2025-06-01/subvolumes/method_get.go new file mode 100644 index 00000000000..20f1ee49120 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/method_get.go @@ -0,0 +1,53 @@ +package subvolumes + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *SubvolumeInfo +} + +// Get ... +func (c SubVolumesClient) Get(ctx context.Context, id SubVolumeId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model SubvolumeInfo + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/method_getmetadata.go b/resource-manager/netapp/2025-06-01/subvolumes/method_getmetadata.go new file mode 100644 index 00000000000..08001acad27 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/method_getmetadata.go @@ -0,0 +1,71 @@ +package subvolumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetMetadataOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *SubvolumeModel +} + +// GetMetadata ... +func (c SubVolumesClient) GetMetadata(ctx context.Context, id SubVolumeId) (result GetMetadataOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/getMetadata", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// GetMetadataThenPoll performs GetMetadata then polls until it's completed +func (c SubVolumesClient) GetMetadataThenPoll(ctx context.Context, id SubVolumeId) error { + result, err := c.GetMetadata(ctx, id) + if err != nil { + return fmt.Errorf("performing GetMetadata: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after GetMetadata: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/method_listbyvolume.go b/resource-manager/netapp/2025-06-01/subvolumes/method_listbyvolume.go new file mode 100644 index 00000000000..e6022de2ecf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/method_listbyvolume.go @@ -0,0 +1,105 @@ +package subvolumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByVolumeOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]SubvolumeInfo +} + +type ListByVolumeCompleteResult struct { + LatestHttpResponse *http.Response + Items []SubvolumeInfo +} + +type ListByVolumeCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListByVolumeCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// ListByVolume ... +func (c SubVolumesClient) ListByVolume(ctx context.Context, id VolumeId) (result ListByVolumeOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListByVolumeCustomPager{}, + Path: fmt.Sprintf("%s/subVolumes", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]SubvolumeInfo `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListByVolumeComplete retrieves all the results into a single object +func (c SubVolumesClient) ListByVolumeComplete(ctx context.Context, id VolumeId) (ListByVolumeCompleteResult, error) { + return c.ListByVolumeCompleteMatchingPredicate(ctx, id, SubvolumeInfoOperationPredicate{}) +} + +// ListByVolumeCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c SubVolumesClient) ListByVolumeCompleteMatchingPredicate(ctx context.Context, id VolumeId, predicate SubvolumeInfoOperationPredicate) (result ListByVolumeCompleteResult, err error) { + items := make([]SubvolumeInfo, 0) + + resp, err := c.ListByVolume(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListByVolumeCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/method_update.go b/resource-manager/netapp/2025-06-01/subvolumes/method_update.go new file mode 100644 index 00000000000..294ead80256 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/method_update.go @@ -0,0 +1,75 @@ +package subvolumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *SubvolumeInfo +} + +// Update ... +func (c SubVolumesClient) Update(ctx context.Context, id SubVolumeId, input SubvolumePatchRequest) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c SubVolumesClient) UpdateThenPoll(ctx context.Context, id SubVolumeId, input SubvolumePatchRequest) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumeinfo.go b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumeinfo.go new file mode 100644 index 00000000000..3adbcfc6002 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumeinfo.go @@ -0,0 +1,16 @@ +package subvolumes + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubvolumeInfo struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SubvolumeProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumemodel.go b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumemodel.go new file mode 100644 index 00000000000..d15828e2987 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumemodel.go @@ -0,0 +1,11 @@ +package subvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubvolumeModel struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SubvolumeModelProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumemodelproperties.go b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumemodelproperties.go new file mode 100644 index 00000000000..26911c386fc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumemodelproperties.go @@ -0,0 +1,71 @@ +package subvolumes + +import ( + "time" + + "github.com/hashicorp/go-azure-helpers/lang/dates" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubvolumeModelProperties struct { + AccessedTimeStamp *string `json:"accessedTimeStamp,omitempty"` + BytesUsed *int64 `json:"bytesUsed,omitempty"` + ChangedTimeStamp *string `json:"changedTimeStamp,omitempty"` + CreationTimeStamp *string `json:"creationTimeStamp,omitempty"` + ModifiedTimeStamp *string `json:"modifiedTimeStamp,omitempty"` + ParentPath *string `json:"parentPath,omitempty"` + Path *string `json:"path,omitempty"` + Permissions *string `json:"permissions,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Size *int64 `json:"size,omitempty"` +} + +func (o *SubvolumeModelProperties) GetAccessedTimeStampAsTime() (*time.Time, error) { + if o.AccessedTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.AccessedTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *SubvolumeModelProperties) SetAccessedTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.AccessedTimeStamp = &formatted +} + +func (o *SubvolumeModelProperties) GetChangedTimeStampAsTime() (*time.Time, error) { + if o.ChangedTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.ChangedTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *SubvolumeModelProperties) SetChangedTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.ChangedTimeStamp = &formatted +} + +func (o *SubvolumeModelProperties) GetCreationTimeStampAsTime() (*time.Time, error) { + if o.CreationTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.CreationTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *SubvolumeModelProperties) SetCreationTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.CreationTimeStamp = &formatted +} + +func (o *SubvolumeModelProperties) GetModifiedTimeStampAsTime() (*time.Time, error) { + if o.ModifiedTimeStamp == nil { + return nil, nil + } + return dates.ParseAsFormat(o.ModifiedTimeStamp, "2006-01-02T15:04:05Z07:00") +} + +func (o *SubvolumeModelProperties) SetModifiedTimeStampAsTime(input time.Time) { + formatted := input.Format("2006-01-02T15:04:05Z07:00") + o.ModifiedTimeStamp = &formatted +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumepatchparams.go b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumepatchparams.go new file mode 100644 index 00000000000..02ea94ff0ac --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumepatchparams.go @@ -0,0 +1,9 @@ +package subvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubvolumePatchParams struct { + Path *string `json:"path,omitempty"` + Size *int64 `json:"size,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumepatchrequest.go b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumepatchrequest.go new file mode 100644 index 00000000000..0394a55073a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumepatchrequest.go @@ -0,0 +1,8 @@ +package subvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubvolumePatchRequest struct { + Properties *SubvolumePatchParams `json:"properties,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumeproperties.go b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumeproperties.go new file mode 100644 index 00000000000..899e995183a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/model_subvolumeproperties.go @@ -0,0 +1,11 @@ +package subvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubvolumeProperties struct { + ParentPath *string `json:"parentPath,omitempty"` + Path *string `json:"path,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Size *int64 `json:"size,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/predicates.go b/resource-manager/netapp/2025-06-01/subvolumes/predicates.go new file mode 100644 index 00000000000..af649a6bd08 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/predicates.go @@ -0,0 +1,27 @@ +package subvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SubvolumeInfoOperationPredicate struct { + Id *string + Name *string + Type *string +} + +func (p SubvolumeInfoOperationPredicate) Matches(input SubvolumeInfo) bool { + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/netapp/2025-06-01/subvolumes/version.go b/resource-manager/netapp/2025-06-01/subvolumes/version.go new file mode 100644 index 00000000000..7b343703539 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/subvolumes/version.go @@ -0,0 +1,10 @@ +package subvolumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/subvolumes/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/README.md b/resource-manager/netapp/2025-06-01/volumegroups/README.md new file mode 100644 index 00000000000..a1fc00b13e5 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/README.md @@ -0,0 +1,81 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumegroups` Documentation + +The `volumegroups` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumegroups" +``` + + +### Client Initialization + +```go +client := volumegroups.NewVolumeGroupsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VolumeGroupsClient.Create` + +```go +ctx := context.TODO() +id := volumegroups.NewVolumeGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "volumeGroupName") + +payload := volumegroups.VolumeGroupDetails{ + // ... +} + + +if err := client.CreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumeGroupsClient.Delete` + +```go +ctx := context.TODO() +id := volumegroups.NewVolumeGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "volumeGroupName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumeGroupsClient.Get` + +```go +ctx := context.TODO() +id := volumegroups.NewVolumeGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "volumeGroupName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VolumeGroupsClient.ListByNetAppAccount` + +```go +ctx := context.TODO() +id := volumegroups.NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + +read, err := client.ListByNetAppAccount(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/netapp/2025-06-01/volumegroups/client.go b/resource-manager/netapp/2025-06-01/volumegroups/client.go new file mode 100644 index 00000000000..580e5ac9fb8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/client.go @@ -0,0 +1,26 @@ +package volumegroups + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroupsClient struct { + Client *resourcemanager.Client +} + +func NewVolumeGroupsClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumeGroupsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumegroups", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumeGroupsClient: %+v", err) + } + + return &VolumeGroupsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/constants.go b/resource-manager/netapp/2025-06-01/volumegroups/constants.go new file mode 100644 index 00000000000..9d09e711e7e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/constants.go @@ -0,0 +1,775 @@ +package volumegroups + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AcceptGrowCapacityPoolForShortTermCloneSplit string + +const ( + AcceptGrowCapacityPoolForShortTermCloneSplitAccepted AcceptGrowCapacityPoolForShortTermCloneSplit = "Accepted" + AcceptGrowCapacityPoolForShortTermCloneSplitDeclined AcceptGrowCapacityPoolForShortTermCloneSplit = "Declined" +) + +func PossibleValuesForAcceptGrowCapacityPoolForShortTermCloneSplit() []string { + return []string{ + string(AcceptGrowCapacityPoolForShortTermCloneSplitAccepted), + string(AcceptGrowCapacityPoolForShortTermCloneSplitDeclined), + } +} + +func (s *AcceptGrowCapacityPoolForShortTermCloneSplit) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAcceptGrowCapacityPoolForShortTermCloneSplit(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAcceptGrowCapacityPoolForShortTermCloneSplit(input string) (*AcceptGrowCapacityPoolForShortTermCloneSplit, error) { + vals := map[string]AcceptGrowCapacityPoolForShortTermCloneSplit{ + "accepted": AcceptGrowCapacityPoolForShortTermCloneSplitAccepted, + "declined": AcceptGrowCapacityPoolForShortTermCloneSplitDeclined, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AcceptGrowCapacityPoolForShortTermCloneSplit(input) + return &out, nil +} + +type ApplicationType string + +const ( + ApplicationTypeORACLE ApplicationType = "ORACLE" + ApplicationTypeSAPNegativeHANA ApplicationType = "SAP-HANA" +) + +func PossibleValuesForApplicationType() []string { + return []string{ + string(ApplicationTypeORACLE), + string(ApplicationTypeSAPNegativeHANA), + } +} + +func (s *ApplicationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseApplicationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseApplicationType(input string) (*ApplicationType, error) { + vals := map[string]ApplicationType{ + "oracle": ApplicationTypeORACLE, + "sap-hana": ApplicationTypeSAPNegativeHANA, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ApplicationType(input) + return &out, nil +} + +type AvsDataStore string + +const ( + AvsDataStoreDisabled AvsDataStore = "Disabled" + AvsDataStoreEnabled AvsDataStore = "Enabled" +) + +func PossibleValuesForAvsDataStore() []string { + return []string{ + string(AvsDataStoreDisabled), + string(AvsDataStoreEnabled), + } +} + +func (s *AvsDataStore) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAvsDataStore(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAvsDataStore(input string) (*AvsDataStore, error) { + vals := map[string]AvsDataStore{ + "disabled": AvsDataStoreDisabled, + "enabled": AvsDataStoreEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AvsDataStore(input) + return &out, nil +} + +type ChownMode string + +const ( + ChownModeRestricted ChownMode = "Restricted" + ChownModeUnrestricted ChownMode = "Unrestricted" +) + +func PossibleValuesForChownMode() []string { + return []string{ + string(ChownModeRestricted), + string(ChownModeUnrestricted), + } +} + +func (s *ChownMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseChownMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseChownMode(input string) (*ChownMode, error) { + vals := map[string]ChownMode{ + "restricted": ChownModeRestricted, + "unrestricted": ChownModeUnrestricted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ChownMode(input) + return &out, nil +} + +type CoolAccessRetrievalPolicy string + +const ( + CoolAccessRetrievalPolicyDefault CoolAccessRetrievalPolicy = "Default" + CoolAccessRetrievalPolicyNever CoolAccessRetrievalPolicy = "Never" + CoolAccessRetrievalPolicyOnRead CoolAccessRetrievalPolicy = "OnRead" +) + +func PossibleValuesForCoolAccessRetrievalPolicy() []string { + return []string{ + string(CoolAccessRetrievalPolicyDefault), + string(CoolAccessRetrievalPolicyNever), + string(CoolAccessRetrievalPolicyOnRead), + } +} + +func (s *CoolAccessRetrievalPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessRetrievalPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessRetrievalPolicy(input string) (*CoolAccessRetrievalPolicy, error) { + vals := map[string]CoolAccessRetrievalPolicy{ + "default": CoolAccessRetrievalPolicyDefault, + "never": CoolAccessRetrievalPolicyNever, + "onread": CoolAccessRetrievalPolicyOnRead, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessRetrievalPolicy(input) + return &out, nil +} + +type CoolAccessTieringPolicy string + +const ( + CoolAccessTieringPolicyAuto CoolAccessTieringPolicy = "Auto" + CoolAccessTieringPolicySnapshotOnly CoolAccessTieringPolicy = "SnapshotOnly" +) + +func PossibleValuesForCoolAccessTieringPolicy() []string { + return []string{ + string(CoolAccessTieringPolicyAuto), + string(CoolAccessTieringPolicySnapshotOnly), + } +} + +func (s *CoolAccessTieringPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessTieringPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessTieringPolicy(input string) (*CoolAccessTieringPolicy, error) { + vals := map[string]CoolAccessTieringPolicy{ + "auto": CoolAccessTieringPolicyAuto, + "snapshotonly": CoolAccessTieringPolicySnapshotOnly, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessTieringPolicy(input) + return &out, nil +} + +type EnableSubvolumes string + +const ( + EnableSubvolumesDisabled EnableSubvolumes = "Disabled" + EnableSubvolumesEnabled EnableSubvolumes = "Enabled" +) + +func PossibleValuesForEnableSubvolumes() []string { + return []string{ + string(EnableSubvolumesDisabled), + string(EnableSubvolumesEnabled), + } +} + +func (s *EnableSubvolumes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEnableSubvolumes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEnableSubvolumes(input string) (*EnableSubvolumes, error) { + vals := map[string]EnableSubvolumes{ + "disabled": EnableSubvolumesDisabled, + "enabled": EnableSubvolumesEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EnableSubvolumes(input) + return &out, nil +} + +type EncryptionKeySource string + +const ( + EncryptionKeySourceMicrosoftPointKeyVault EncryptionKeySource = "Microsoft.KeyVault" + EncryptionKeySourceMicrosoftPointNetApp EncryptionKeySource = "Microsoft.NetApp" +) + +func PossibleValuesForEncryptionKeySource() []string { + return []string{ + string(EncryptionKeySourceMicrosoftPointKeyVault), + string(EncryptionKeySourceMicrosoftPointNetApp), + } +} + +func (s *EncryptionKeySource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEncryptionKeySource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEncryptionKeySource(input string) (*EncryptionKeySource, error) { + vals := map[string]EncryptionKeySource{ + "microsoft.keyvault": EncryptionKeySourceMicrosoftPointKeyVault, + "microsoft.netapp": EncryptionKeySourceMicrosoftPointNetApp, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EncryptionKeySource(input) + return &out, nil +} + +type EndpointType string + +const ( + EndpointTypeDst EndpointType = "dst" + EndpointTypeSrc EndpointType = "src" +) + +func PossibleValuesForEndpointType() []string { + return []string{ + string(EndpointTypeDst), + string(EndpointTypeSrc), + } +} + +func (s *EndpointType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEndpointType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEndpointType(input string) (*EndpointType, error) { + vals := map[string]EndpointType{ + "dst": EndpointTypeDst, + "src": EndpointTypeSrc, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EndpointType(input) + return &out, nil +} + +type FileAccessLogs string + +const ( + FileAccessLogsDisabled FileAccessLogs = "Disabled" + FileAccessLogsEnabled FileAccessLogs = "Enabled" +) + +func PossibleValuesForFileAccessLogs() []string { + return []string{ + string(FileAccessLogsDisabled), + string(FileAccessLogsEnabled), + } +} + +func (s *FileAccessLogs) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseFileAccessLogs(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseFileAccessLogs(input string) (*FileAccessLogs, error) { + vals := map[string]FileAccessLogs{ + "disabled": FileAccessLogsDisabled, + "enabled": FileAccessLogsEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := FileAccessLogs(input) + return &out, nil +} + +type NetworkFeatures string + +const ( + NetworkFeaturesBasic NetworkFeatures = "Basic" + NetworkFeaturesBasicStandard NetworkFeatures = "Basic_Standard" + NetworkFeaturesStandard NetworkFeatures = "Standard" + NetworkFeaturesStandardBasic NetworkFeatures = "Standard_Basic" +) + +func PossibleValuesForNetworkFeatures() []string { + return []string{ + string(NetworkFeaturesBasic), + string(NetworkFeaturesBasicStandard), + string(NetworkFeaturesStandard), + string(NetworkFeaturesStandardBasic), + } +} + +func (s *NetworkFeatures) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkFeatures(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkFeatures(input string) (*NetworkFeatures, error) { + vals := map[string]NetworkFeatures{ + "basic": NetworkFeaturesBasic, + "basic_standard": NetworkFeaturesBasicStandard, + "standard": NetworkFeaturesStandard, + "standard_basic": NetworkFeaturesStandardBasic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkFeatures(input) + return &out, nil +} + +type ReplicationSchedule string + +const ( + ReplicationScheduleDaily ReplicationSchedule = "daily" + ReplicationScheduleHourly ReplicationSchedule = "hourly" + ReplicationScheduleOneZerominutely ReplicationSchedule = "_10minutely" +) + +func PossibleValuesForReplicationSchedule() []string { + return []string{ + string(ReplicationScheduleDaily), + string(ReplicationScheduleHourly), + string(ReplicationScheduleOneZerominutely), + } +} + +func (s *ReplicationSchedule) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationSchedule(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationSchedule(input string) (*ReplicationSchedule, error) { + vals := map[string]ReplicationSchedule{ + "daily": ReplicationScheduleDaily, + "hourly": ReplicationScheduleHourly, + "_10minutely": ReplicationScheduleOneZerominutely, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationSchedule(input) + return &out, nil +} + +type ReplicationType string + +const ( + ReplicationTypeCrossRegionReplication ReplicationType = "CrossRegionReplication" + ReplicationTypeCrossZoneReplication ReplicationType = "CrossZoneReplication" +) + +func PossibleValuesForReplicationType() []string { + return []string{ + string(ReplicationTypeCrossRegionReplication), + string(ReplicationTypeCrossZoneReplication), + } +} + +func (s *ReplicationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationType(input string) (*ReplicationType, error) { + vals := map[string]ReplicationType{ + "crossregionreplication": ReplicationTypeCrossRegionReplication, + "crosszonereplication": ReplicationTypeCrossZoneReplication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationType(input) + return &out, nil +} + +type SecurityStyle string + +const ( + SecurityStyleNtfs SecurityStyle = "ntfs" + SecurityStyleUnix SecurityStyle = "unix" +) + +func PossibleValuesForSecurityStyle() []string { + return []string{ + string(SecurityStyleNtfs), + string(SecurityStyleUnix), + } +} + +func (s *SecurityStyle) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSecurityStyle(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSecurityStyle(input string) (*SecurityStyle, error) { + vals := map[string]SecurityStyle{ + "ntfs": SecurityStyleNtfs, + "unix": SecurityStyleUnix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SecurityStyle(input) + return &out, nil +} + +type ServiceLevel string + +const ( + ServiceLevelFlexible ServiceLevel = "Flexible" + ServiceLevelPremium ServiceLevel = "Premium" + ServiceLevelStandard ServiceLevel = "Standard" + ServiceLevelStandardZRS ServiceLevel = "StandardZRS" + ServiceLevelUltra ServiceLevel = "Ultra" +) + +func PossibleValuesForServiceLevel() []string { + return []string{ + string(ServiceLevelFlexible), + string(ServiceLevelPremium), + string(ServiceLevelStandard), + string(ServiceLevelStandardZRS), + string(ServiceLevelUltra), + } +} + +func (s *ServiceLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceLevel(input string) (*ServiceLevel, error) { + vals := map[string]ServiceLevel{ + "flexible": ServiceLevelFlexible, + "premium": ServiceLevelPremium, + "standard": ServiceLevelStandard, + "standardzrs": ServiceLevelStandardZRS, + "ultra": ServiceLevelUltra, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceLevel(input) + return &out, nil +} + +type SmbAccessBasedEnumeration string + +const ( + SmbAccessBasedEnumerationDisabled SmbAccessBasedEnumeration = "Disabled" + SmbAccessBasedEnumerationEnabled SmbAccessBasedEnumeration = "Enabled" +) + +func PossibleValuesForSmbAccessBasedEnumeration() []string { + return []string{ + string(SmbAccessBasedEnumerationDisabled), + string(SmbAccessBasedEnumerationEnabled), + } +} + +func (s *SmbAccessBasedEnumeration) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbAccessBasedEnumeration(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbAccessBasedEnumeration(input string) (*SmbAccessBasedEnumeration, error) { + vals := map[string]SmbAccessBasedEnumeration{ + "disabled": SmbAccessBasedEnumerationDisabled, + "enabled": SmbAccessBasedEnumerationEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbAccessBasedEnumeration(input) + return &out, nil +} + +type SmbNonBrowsable string + +const ( + SmbNonBrowsableDisabled SmbNonBrowsable = "Disabled" + SmbNonBrowsableEnabled SmbNonBrowsable = "Enabled" +) + +func PossibleValuesForSmbNonBrowsable() []string { + return []string{ + string(SmbNonBrowsableDisabled), + string(SmbNonBrowsableEnabled), + } +} + +func (s *SmbNonBrowsable) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbNonBrowsable(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbNonBrowsable(input string) (*SmbNonBrowsable, error) { + vals := map[string]SmbNonBrowsable{ + "disabled": SmbNonBrowsableDisabled, + "enabled": SmbNonBrowsableEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbNonBrowsable(input) + return &out, nil +} + +type VolumeStorageToNetworkProximity string + +const ( + VolumeStorageToNetworkProximityAcrossTTwo VolumeStorageToNetworkProximity = "AcrossT2" + VolumeStorageToNetworkProximityDefault VolumeStorageToNetworkProximity = "Default" + VolumeStorageToNetworkProximityTOne VolumeStorageToNetworkProximity = "T1" + VolumeStorageToNetworkProximityTTwo VolumeStorageToNetworkProximity = "T2" +) + +func PossibleValuesForVolumeStorageToNetworkProximity() []string { + return []string{ + string(VolumeStorageToNetworkProximityAcrossTTwo), + string(VolumeStorageToNetworkProximityDefault), + string(VolumeStorageToNetworkProximityTOne), + string(VolumeStorageToNetworkProximityTTwo), + } +} + +func (s *VolumeStorageToNetworkProximity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseVolumeStorageToNetworkProximity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseVolumeStorageToNetworkProximity(input string) (*VolumeStorageToNetworkProximity, error) { + vals := map[string]VolumeStorageToNetworkProximity{ + "acrosst2": VolumeStorageToNetworkProximityAcrossTTwo, + "default": VolumeStorageToNetworkProximityDefault, + "t1": VolumeStorageToNetworkProximityTOne, + "t2": VolumeStorageToNetworkProximityTTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VolumeStorageToNetworkProximity(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/id_netappaccount.go b/resource-manager/netapp/2025-06-01/volumegroups/id_netappaccount.go new file mode 100644 index 00000000000..b76f8ad025d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/id_netappaccount.go @@ -0,0 +1,130 @@ +package volumegroups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&NetAppAccountId{}) +} + +var _ resourceids.ResourceId = &NetAppAccountId{} + +// NetAppAccountId is a struct representing the Resource ID for a Net App Account +type NetAppAccountId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string +} + +// NewNetAppAccountID returns a new NetAppAccountId struct +func NewNetAppAccountID(subscriptionId string, resourceGroupName string, netAppAccountName string) NetAppAccountId { + return NetAppAccountId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + } +} + +// ParseNetAppAccountID parses 'input' into a NetAppAccountId +func ParseNetAppAccountID(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseNetAppAccountIDInsensitively parses 'input' case-insensitively into a NetAppAccountId +// note: this method should only be used for API response data and not user input +func ParseNetAppAccountIDInsensitively(input string) (*NetAppAccountId, error) { + parser := resourceids.NewParserFromResourceIdType(&NetAppAccountId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := NetAppAccountId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *NetAppAccountId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + return nil +} + +// ValidateNetAppAccountID checks that 'input' can be parsed as a Net App Account ID +func ValidateNetAppAccountID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseNetAppAccountID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Net App Account ID +func (id NetAppAccountId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Net App Account ID +func (id NetAppAccountId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + } +} + +// String returns a human-readable description of this Net App Account ID +func (id NetAppAccountId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + } + return fmt.Sprintf("Net App Account (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/id_netappaccount_test.go b/resource-manager/netapp/2025-06-01/volumegroups/id_netappaccount_test.go new file mode 100644 index 00000000000..ea57dfac8fc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/id_netappaccount_test.go @@ -0,0 +1,282 @@ +package volumegroups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &NetAppAccountId{} + +func TestNewNetAppAccountID(t *testing.T) { + id := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } +} + +func TestFormatNetAppAccountID(t *testing.T) { + actual := NewNetAppAccountID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseNetAppAccountID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestParseNetAppAccountIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *NetAppAccountId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Expected: &NetAppAccountId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseNetAppAccountIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + } +} + +func TestSegmentsForNetAppAccountId(t *testing.T) { + segments := NetAppAccountId{}.Segments() + if len(segments) == 0 { + t.Fatalf("NetAppAccountId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/id_volumegroup.go b/resource-manager/netapp/2025-06-01/volumegroups/id_volumegroup.go new file mode 100644 index 00000000000..2fbfb2b2133 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/id_volumegroup.go @@ -0,0 +1,139 @@ +package volumegroups + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeGroupId{}) +} + +var _ resourceids.ResourceId = &VolumeGroupId{} + +// VolumeGroupId is a struct representing the Resource ID for a Volume Group +type VolumeGroupId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + VolumeGroupName string +} + +// NewVolumeGroupID returns a new VolumeGroupId struct +func NewVolumeGroupID(subscriptionId string, resourceGroupName string, netAppAccountName string, volumeGroupName string) VolumeGroupId { + return VolumeGroupId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + VolumeGroupName: volumeGroupName, + } +} + +// ParseVolumeGroupID parses 'input' into a VolumeGroupId +func ParseVolumeGroupID(input string) (*VolumeGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeGroupId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeGroupIDInsensitively parses 'input' case-insensitively into a VolumeGroupId +// note: this method should only be used for API response data and not user input +func ParseVolumeGroupIDInsensitively(input string) (*VolumeGroupId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeGroupId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeGroupId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeGroupId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.VolumeGroupName, ok = input.Parsed["volumeGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeGroupName", input) + } + + return nil +} + +// ValidateVolumeGroupID checks that 'input' can be parsed as a Volume Group ID +func ValidateVolumeGroupID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeGroupID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume Group ID +func (id VolumeGroupId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/volumeGroups/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.VolumeGroupName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume Group ID +func (id VolumeGroupId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticVolumeGroups", "volumeGroups", "volumeGroups"), + resourceids.UserSpecifiedSegment("volumeGroupName", "volumeGroupName"), + } +} + +// String returns a human-readable description of this Volume Group ID +func (id VolumeGroupId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Volume Group Name: %q", id.VolumeGroupName), + } + return fmt.Sprintf("Volume Group (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/id_volumegroup_test.go b/resource-manager/netapp/2025-06-01/volumegroups/id_volumegroup_test.go new file mode 100644 index 00000000000..9a1dd20a884 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/id_volumegroup_test.go @@ -0,0 +1,327 @@ +package volumegroups + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeGroupId{} + +func TestNewVolumeGroupID(t *testing.T) { + id := NewVolumeGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "volumeGroupName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.VolumeGroupName != "volumeGroupName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeGroupName'", id.VolumeGroupName, "volumeGroupName") + } +} + +func TestFormatVolumeGroupID(t *testing.T) { + actual := NewVolumeGroupID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "volumeGroupName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/volumeGroups/volumeGroupName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeGroupID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/volumeGroups", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/volumeGroups/volumeGroupName", + Expected: &VolumeGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + VolumeGroupName: "volumeGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/volumeGroups/volumeGroupName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeGroupID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.VolumeGroupName != v.Expected.VolumeGroupName { + t.Fatalf("Expected %q but got %q for VolumeGroupName", v.Expected.VolumeGroupName, actual.VolumeGroupName) + } + + } +} + +func TestParseVolumeGroupIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeGroupId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/volumeGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/vOlUmEgRoUpS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/volumeGroups/volumeGroupName", + Expected: &VolumeGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + VolumeGroupName: "volumeGroupName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/volumeGroups/volumeGroupName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/vOlUmEgRoUpS/vOlUmEgRoUpNaMe", + Expected: &VolumeGroupId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + VolumeGroupName: "vOlUmEgRoUpNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/vOlUmEgRoUpS/vOlUmEgRoUpNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeGroupIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.VolumeGroupName != v.Expected.VolumeGroupName { + t.Fatalf("Expected %q but got %q for VolumeGroupName", v.Expected.VolumeGroupName, actual.VolumeGroupName) + } + + } +} + +func TestSegmentsForVolumeGroupId(t *testing.T) { + segments := VolumeGroupId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeGroupId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/method_create.go b/resource-manager/netapp/2025-06-01/volumegroups/method_create.go new file mode 100644 index 00000000000..20ea352a9b8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/method_create.go @@ -0,0 +1,74 @@ +package volumegroups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *VolumeGroupDetails +} + +// Create ... +func (c VolumeGroupsClient) Create(ctx context.Context, id VolumeGroupId, input VolumeGroupDetails) (result CreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateThenPoll performs Create then polls until it's completed +func (c VolumeGroupsClient) CreateThenPoll(ctx context.Context, id VolumeGroupId, input VolumeGroupDetails) error { + result, err := c.Create(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Create: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Create: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/method_delete.go b/resource-manager/netapp/2025-06-01/volumegroups/method_delete.go new file mode 100644 index 00000000000..32e6c75b150 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/method_delete.go @@ -0,0 +1,71 @@ +package volumegroups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c VolumeGroupsClient) Delete(ctx context.Context, id VolumeGroupId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c VolumeGroupsClient) DeleteThenPoll(ctx context.Context, id VolumeGroupId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/method_get.go b/resource-manager/netapp/2025-06-01/volumegroups/method_get.go new file mode 100644 index 00000000000..59130918900 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/method_get.go @@ -0,0 +1,53 @@ +package volumegroups + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *VolumeGroupDetails +} + +// Get ... +func (c VolumeGroupsClient) Get(ctx context.Context, id VolumeGroupId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model VolumeGroupDetails + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/method_listbynetappaccount.go b/resource-manager/netapp/2025-06-01/volumegroups/method_listbynetappaccount.go new file mode 100644 index 00000000000..29dee764979 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/method_listbynetappaccount.go @@ -0,0 +1,54 @@ +package volumegroups + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByNetAppAccountOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *VolumeGroupList +} + +// ListByNetAppAccount ... +func (c VolumeGroupsClient) ListByNetAppAccount(ctx context.Context, id NetAppAccountId) (result ListByNetAppAccountOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/volumeGroups", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model VolumeGroupList + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_destinationreplication.go b/resource-manager/netapp/2025-06-01/volumegroups/model_destinationreplication.go new file mode 100644 index 00000000000..6f563569845 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_destinationreplication.go @@ -0,0 +1,11 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DestinationReplication struct { + Region *string `json:"region,omitempty"` + ReplicationType *ReplicationType `json:"replicationType,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + Zone *string `json:"zone,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_exportpolicyrule.go b/resource-manager/netapp/2025-06-01/volumegroups/model_exportpolicyrule.go new file mode 100644 index 00000000000..e17395ba1a7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_exportpolicyrule.go @@ -0,0 +1,22 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExportPolicyRule struct { + AllowedClients *string `json:"allowedClients,omitempty"` + ChownMode *ChownMode `json:"chownMode,omitempty"` + Cifs *bool `json:"cifs,omitempty"` + HasRootAccess *bool `json:"hasRootAccess,omitempty"` + Kerberos5ReadOnly *bool `json:"kerberos5ReadOnly,omitempty"` + Kerberos5ReadWrite *bool `json:"kerberos5ReadWrite,omitempty"` + Kerberos5iReadOnly *bool `json:"kerberos5iReadOnly,omitempty"` + Kerberos5iReadWrite *bool `json:"kerberos5iReadWrite,omitempty"` + Kerberos5pReadOnly *bool `json:"kerberos5pReadOnly,omitempty"` + Kerberos5pReadWrite *bool `json:"kerberos5pReadWrite,omitempty"` + Nfsv3 *bool `json:"nfsv3,omitempty"` + Nfsv41 *bool `json:"nfsv41,omitempty"` + RuleIndex *int64 `json:"ruleIndex,omitempty"` + UnixReadOnly *bool `json:"unixReadOnly,omitempty"` + UnixReadWrite *bool `json:"unixReadWrite,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_mounttargetproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_mounttargetproperties.go new file mode 100644 index 00000000000..5844b80ce9d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_mounttargetproperties.go @@ -0,0 +1,11 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MountTargetProperties struct { + FileSystemId string `json:"fileSystemId"` + IPAddress *string `json:"ipAddress,omitempty"` + MountTargetId *string `json:"mountTargetId,omitempty"` + SmbServerFqdn *string `json:"smbServerFqdn,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_placementkeyvaluepairs.go b/resource-manager/netapp/2025-06-01/volumegroups/model_placementkeyvaluepairs.go new file mode 100644 index 00000000000..1714b1662a1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_placementkeyvaluepairs.go @@ -0,0 +1,9 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PlacementKeyValuePairs struct { + Key string `json:"key"` + Value string `json:"value"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_remotepath.go b/resource-manager/netapp/2025-06-01/volumegroups/model_remotepath.go new file mode 100644 index 00000000000..e1b941b3ea6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_remotepath.go @@ -0,0 +1,10 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RemotePath struct { + ExternalHostName string `json:"externalHostName"` + ServerName string `json:"serverName"` + VolumeName string `json:"volumeName"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_replicationobject.go b/resource-manager/netapp/2025-06-01/volumegroups/model_replicationobject.go new file mode 100644 index 00000000000..5715e56e940 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_replicationobject.go @@ -0,0 +1,14 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReplicationObject struct { + DestinationReplications *[]DestinationReplication `json:"destinationReplications,omitempty"` + EndpointType *EndpointType `json:"endpointType,omitempty"` + RemotePath *RemotePath `json:"remotePath,omitempty"` + RemoteVolumeRegion *string `json:"remoteVolumeRegion,omitempty"` + RemoteVolumeResourceId *string `json:"remoteVolumeResourceId,omitempty"` + ReplicationId *string `json:"replicationId,omitempty"` + ReplicationSchedule *ReplicationSchedule `json:"replicationSchedule,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumebackupproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumebackupproperties.go new file mode 100644 index 00000000000..39ce5a2b79b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumebackupproperties.go @@ -0,0 +1,10 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeBackupProperties struct { + BackupPolicyId *string `json:"backupPolicyId,omitempty"` + BackupVaultId *string `json:"backupVaultId,omitempty"` + PolicyEnforced *bool `json:"policyEnforced,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroup.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroup.go new file mode 100644 index 00000000000..5e4df7bb9c0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroup.go @@ -0,0 +1,12 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroup struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *VolumeGroupListProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupdetails.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupdetails.go new file mode 100644 index 00000000000..5f01af82c0f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupdetails.go @@ -0,0 +1,12 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroupDetails struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *VolumeGroupProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumegrouplist.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegrouplist.go new file mode 100644 index 00000000000..447c6e6e858 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegrouplist.go @@ -0,0 +1,8 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroupList struct { + Value *[]VolumeGroup `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumegrouplistproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegrouplistproperties.go new file mode 100644 index 00000000000..5788ee516fc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegrouplistproperties.go @@ -0,0 +1,9 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroupListProperties struct { + GroupMetaData *VolumeGroupMetaData `json:"groupMetaData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupmetadata.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupmetadata.go new file mode 100644 index 00000000000..b9fa343690c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupmetadata.go @@ -0,0 +1,12 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroupMetaData struct { + ApplicationIdentifier *string `json:"applicationIdentifier,omitempty"` + ApplicationType *ApplicationType `json:"applicationType,omitempty"` + GlobalPlacementRules *[]PlacementKeyValuePairs `json:"globalPlacementRules,omitempty"` + GroupDescription *string `json:"groupDescription,omitempty"` + VolumesCount *int64 `json:"volumesCount,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupproperties.go new file mode 100644 index 00000000000..0bab153b397 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupproperties.go @@ -0,0 +1,10 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroupProperties struct { + GroupMetaData *VolumeGroupMetaData `json:"groupMetaData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Volumes *[]VolumeGroupVolumeProperties `json:"volumes,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupvolumeproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupvolumeproperties.go new file mode 100644 index 00000000000..935a8a5f480 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumegroupvolumeproperties.go @@ -0,0 +1,17 @@ +package volumegroups + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeGroupVolumeProperties struct { + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties VolumeProperties `json:"properties"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` + Zones *zones.Schema `json:"zones,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumeproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumeproperties.go new file mode 100644 index 00000000000..58e5f13cbbf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumeproperties.go @@ -0,0 +1,65 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeProperties struct { + AcceptGrowCapacityPoolForShortTermCloneSplit *AcceptGrowCapacityPoolForShortTermCloneSplit `json:"acceptGrowCapacityPoolForShortTermCloneSplit,omitempty"` + ActualThroughputMibps *float64 `json:"actualThroughputMibps,omitempty"` + AvsDataStore *AvsDataStore `json:"avsDataStore,omitempty"` + BackupId *string `json:"backupId,omitempty"` + BaremetalTenantId *string `json:"baremetalTenantId,omitempty"` + CapacityPoolResourceId *string `json:"capacityPoolResourceId,omitempty"` + CloneProgress *int64 `json:"cloneProgress,omitempty"` + CoolAccess *bool `json:"coolAccess,omitempty"` + CoolAccessRetrievalPolicy *CoolAccessRetrievalPolicy `json:"coolAccessRetrievalPolicy,omitempty"` + CoolAccessTieringPolicy *CoolAccessTieringPolicy `json:"coolAccessTieringPolicy,omitempty"` + CoolnessPeriod *int64 `json:"coolnessPeriod,omitempty"` + CreationToken string `json:"creationToken"` + DataProtection *VolumePropertiesDataProtection `json:"dataProtection,omitempty"` + DataStoreResourceId *[]string `json:"dataStoreResourceId,omitempty"` + DefaultGroupQuotaInKiBs *int64 `json:"defaultGroupQuotaInKiBs,omitempty"` + DefaultUserQuotaInKiBs *int64 `json:"defaultUserQuotaInKiBs,omitempty"` + DeleteBaseSnapshot *bool `json:"deleteBaseSnapshot,omitempty"` + EffectiveNetworkFeatures *NetworkFeatures `json:"effectiveNetworkFeatures,omitempty"` + EnableSubvolumes *EnableSubvolumes `json:"enableSubvolumes,omitempty"` + Encrypted *bool `json:"encrypted,omitempty"` + EncryptionKeySource *EncryptionKeySource `json:"encryptionKeySource,omitempty"` + ExportPolicy *VolumePropertiesExportPolicy `json:"exportPolicy,omitempty"` + FileAccessLogs *FileAccessLogs `json:"fileAccessLogs,omitempty"` + FileSystemId *string `json:"fileSystemId,omitempty"` + InheritedSizeInBytes *int64 `json:"inheritedSizeInBytes,omitempty"` + IsDefaultQuotaEnabled *bool `json:"isDefaultQuotaEnabled,omitempty"` + IsLargeVolume *bool `json:"isLargeVolume,omitempty"` + IsRestoring *bool `json:"isRestoring,omitempty"` + KerberosEnabled *bool `json:"kerberosEnabled,omitempty"` + KeyVaultPrivateEndpointResourceId *string `json:"keyVaultPrivateEndpointResourceId,omitempty"` + LdapEnabled *bool `json:"ldapEnabled,omitempty"` + MaximumNumberOfFiles *int64 `json:"maximumNumberOfFiles,omitempty"` + MountTargets *[]MountTargetProperties `json:"mountTargets,omitempty"` + NetworkFeatures *NetworkFeatures `json:"networkFeatures,omitempty"` + NetworkSiblingSetId *string `json:"networkSiblingSetId,omitempty"` + OriginatingResourceId *string `json:"originatingResourceId,omitempty"` + PlacementRules *[]PlacementKeyValuePairs `json:"placementRules,omitempty"` + ProtocolTypes *[]string `json:"protocolTypes,omitempty"` + ProvisionedAvailabilityZone *string `json:"provisionedAvailabilityZone,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroup *string `json:"proximityPlacementGroup,omitempty"` + SecurityStyle *SecurityStyle `json:"securityStyle,omitempty"` + ServiceLevel *ServiceLevel `json:"serviceLevel,omitempty"` + SmbAccessBasedEnumeration *SmbAccessBasedEnumeration `json:"smbAccessBasedEnumeration,omitempty"` + SmbContinuouslyAvailable *bool `json:"smbContinuouslyAvailable,omitempty"` + SmbEncryption *bool `json:"smbEncryption,omitempty"` + SmbNonBrowsable *SmbNonBrowsable `json:"smbNonBrowsable,omitempty"` + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty"` + SnapshotId *string `json:"snapshotId,omitempty"` + StorageToNetworkProximity *VolumeStorageToNetworkProximity `json:"storageToNetworkProximity,omitempty"` + SubnetId string `json:"subnetId"` + T2Network *string `json:"t2Network,omitempty"` + ThroughputMibps *float64 `json:"throughputMibps,omitempty"` + UnixPermissions *string `json:"unixPermissions,omitempty"` + UsageThreshold int64 `json:"usageThreshold"` + VolumeGroupName *string `json:"volumeGroupName,omitempty"` + VolumeSpecName *string `json:"volumeSpecName,omitempty"` + VolumeType *string `json:"volumeType,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumepropertiesdataprotection.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumepropertiesdataprotection.go new file mode 100644 index 00000000000..632e45e64b0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumepropertiesdataprotection.go @@ -0,0 +1,11 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesDataProtection struct { + Backup *VolumeBackupProperties `json:"backup,omitempty"` + Replication *ReplicationObject `json:"replication,omitempty"` + Snapshot *VolumeSnapshotProperties `json:"snapshot,omitempty"` + VolumeRelocation *VolumeRelocationProperties `json:"volumeRelocation,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumepropertiesexportpolicy.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumepropertiesexportpolicy.go new file mode 100644 index 00000000000..227c8586d6f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumepropertiesexportpolicy.go @@ -0,0 +1,8 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesExportPolicy struct { + Rules *[]ExportPolicyRule `json:"rules,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumerelocationproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumerelocationproperties.go new file mode 100644 index 00000000000..4fea57c7396 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumerelocationproperties.go @@ -0,0 +1,9 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeRelocationProperties struct { + ReadyToBeFinalized *bool `json:"readyToBeFinalized,omitempty"` + RelocationRequested *bool `json:"relocationRequested,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/model_volumesnapshotproperties.go b/resource-manager/netapp/2025-06-01/volumegroups/model_volumesnapshotproperties.go new file mode 100644 index 00000000000..a3f5011da3b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/model_volumesnapshotproperties.go @@ -0,0 +1,8 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeSnapshotProperties struct { + SnapshotPolicyId *string `json:"snapshotPolicyId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumegroups/version.go b/resource-manager/netapp/2025-06-01/volumegroups/version.go new file mode 100644 index 00000000000..6445aaeaeaf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumegroups/version.go @@ -0,0 +1,10 @@ +package volumegroups + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumegroups/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/README.md b/resource-manager/netapp/2025-06-01/volumequotarules/README.md new file mode 100644 index 00000000000..3a5e391fc2e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/README.md @@ -0,0 +1,98 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumequotarules` Documentation + +The `volumequotarules` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumequotarules" +``` + + +### Client Initialization + +```go +client := volumequotarules.NewVolumeQuotaRulesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VolumeQuotaRulesClient.Create` + +```go +ctx := context.TODO() +id := volumequotarules.NewVolumeQuotaRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "volumeQuotaRuleName") + +payload := volumequotarules.VolumeQuotaRule{ + // ... +} + + +if err := client.CreateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumeQuotaRulesClient.Delete` + +```go +ctx := context.TODO() +id := volumequotarules.NewVolumeQuotaRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "volumeQuotaRuleName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumeQuotaRulesClient.Get` + +```go +ctx := context.TODO() +id := volumequotarules.NewVolumeQuotaRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "volumeQuotaRuleName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VolumeQuotaRulesClient.ListByVolume` + +```go +ctx := context.TODO() +id := volumequotarules.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +read, err := client.ListByVolume(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VolumeQuotaRulesClient.Update` + +```go +ctx := context.TODO() +id := volumequotarules.NewVolumeQuotaRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "volumeQuotaRuleName") + +payload := volumequotarules.VolumeQuotaRulePatch{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/client.go b/resource-manager/netapp/2025-06-01/volumequotarules/client.go new file mode 100644 index 00000000000..b42efdd36e2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/client.go @@ -0,0 +1,26 @@ +package volumequotarules + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeQuotaRulesClient struct { + Client *resourcemanager.Client +} + +func NewVolumeQuotaRulesClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumeQuotaRulesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumequotarules", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumeQuotaRulesClient: %+v", err) + } + + return &VolumeQuotaRulesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/constants.go b/resource-manager/netapp/2025-06-01/volumequotarules/constants.go new file mode 100644 index 00000000000..fb0e41e1afb --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/constants.go @@ -0,0 +1,113 @@ +package volumequotarules + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCreating ProvisioningState = "Creating" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateMoving ProvisioningState = "Moving" + ProvisioningStatePatching ProvisioningState = "Patching" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateAccepted), + string(ProvisioningStateCreating), + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStateMoving), + string(ProvisioningStatePatching), + string(ProvisioningStateSucceeded), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "accepted": ProvisioningStateAccepted, + "creating": ProvisioningStateCreating, + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "moving": ProvisioningStateMoving, + "patching": ProvisioningStatePatching, + "succeeded": ProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type Type string + +const ( + TypeDefaultGroupQuota Type = "DefaultGroupQuota" + TypeDefaultUserQuota Type = "DefaultUserQuota" + TypeIndividualGroupQuota Type = "IndividualGroupQuota" + TypeIndividualUserQuota Type = "IndividualUserQuota" +) + +func PossibleValuesForType() []string { + return []string{ + string(TypeDefaultGroupQuota), + string(TypeDefaultUserQuota), + string(TypeIndividualGroupQuota), + string(TypeIndividualUserQuota), + } +} + +func (s *Type) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseType(input string) (*Type, error) { + vals := map[string]Type{ + "defaultgroupquota": TypeDefaultGroupQuota, + "defaultuserquota": TypeDefaultUserQuota, + "individualgroupquota": TypeIndividualGroupQuota, + "individualuserquota": TypeIndividualUserQuota, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := Type(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/id_volume.go b/resource-manager/netapp/2025-06-01/volumequotarules/id_volume.go new file mode 100644 index 00000000000..f3aab9f00df --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/id_volume.go @@ -0,0 +1,148 @@ +package volumequotarules + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/id_volume_test.go b/resource-manager/netapp/2025-06-01/volumequotarules/id_volume_test.go new file mode 100644 index 00000000000..c0acc543f9f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/id_volume_test.go @@ -0,0 +1,372 @@ +package volumequotarules + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/id_volumequotarule.go b/resource-manager/netapp/2025-06-01/volumequotarules/id_volumequotarule.go new file mode 100644 index 00000000000..9007e1468ae --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/id_volumequotarule.go @@ -0,0 +1,157 @@ +package volumequotarules + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeQuotaRuleId{}) +} + +var _ resourceids.ResourceId = &VolumeQuotaRuleId{} + +// VolumeQuotaRuleId is a struct representing the Resource ID for a Volume Quota Rule +type VolumeQuotaRuleId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string + VolumeQuotaRuleName string +} + +// NewVolumeQuotaRuleID returns a new VolumeQuotaRuleId struct +func NewVolumeQuotaRuleID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string, volumeQuotaRuleName string) VolumeQuotaRuleId { + return VolumeQuotaRuleId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + VolumeQuotaRuleName: volumeQuotaRuleName, + } +} + +// ParseVolumeQuotaRuleID parses 'input' into a VolumeQuotaRuleId +func ParseVolumeQuotaRuleID(input string) (*VolumeQuotaRuleId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeQuotaRuleId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeQuotaRuleId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeQuotaRuleIDInsensitively parses 'input' case-insensitively into a VolumeQuotaRuleId +// note: this method should only be used for API response data and not user input +func ParseVolumeQuotaRuleIDInsensitively(input string) (*VolumeQuotaRuleId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeQuotaRuleId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeQuotaRuleId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeQuotaRuleId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + if id.VolumeQuotaRuleName, ok = input.Parsed["volumeQuotaRuleName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeQuotaRuleName", input) + } + + return nil +} + +// ValidateVolumeQuotaRuleID checks that 'input' can be parsed as a Volume Quota Rule ID +func ValidateVolumeQuotaRuleID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeQuotaRuleID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume Quota Rule ID +func (id VolumeQuotaRuleId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s/volumeQuotaRules/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName, id.VolumeQuotaRuleName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume Quota Rule ID +func (id VolumeQuotaRuleId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + resourceids.StaticSegment("staticVolumeQuotaRules", "volumeQuotaRules", "volumeQuotaRules"), + resourceids.UserSpecifiedSegment("volumeQuotaRuleName", "volumeQuotaRuleName"), + } +} + +// String returns a human-readable description of this Volume Quota Rule ID +func (id VolumeQuotaRuleId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + fmt.Sprintf("Volume Quota Rule Name: %q", id.VolumeQuotaRuleName), + } + return fmt.Sprintf("Volume Quota Rule (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/id_volumequotarule_test.go b/resource-manager/netapp/2025-06-01/volumequotarules/id_volumequotarule_test.go new file mode 100644 index 00000000000..3b3ee84e25f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/id_volumequotarule_test.go @@ -0,0 +1,417 @@ +package volumequotarules + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeQuotaRuleId{} + +func TestNewVolumeQuotaRuleID(t *testing.T) { + id := NewVolumeQuotaRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "volumeQuotaRuleName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } + + if id.VolumeQuotaRuleName != "volumeQuotaRuleName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeQuotaRuleName'", id.VolumeQuotaRuleName, "volumeQuotaRuleName") + } +} + +func TestFormatVolumeQuotaRuleID(t *testing.T) { + actual := NewVolumeQuotaRuleID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName", "volumeQuotaRuleName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/volumeQuotaRules/volumeQuotaRuleName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeQuotaRuleID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeQuotaRuleId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/volumeQuotaRules", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/volumeQuotaRules/volumeQuotaRuleName", + Expected: &VolumeQuotaRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + VolumeQuotaRuleName: "volumeQuotaRuleName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/volumeQuotaRules/volumeQuotaRuleName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeQuotaRuleID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + if actual.VolumeQuotaRuleName != v.Expected.VolumeQuotaRuleName { + t.Fatalf("Expected %q but got %q for VolumeQuotaRuleName", v.Expected.VolumeQuotaRuleName, actual.VolumeQuotaRuleName) + } + + } +} + +func TestParseVolumeQuotaRuleIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeQuotaRuleId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/volumeQuotaRules", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/vOlUmEqUoTaRuLeS", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/volumeQuotaRules/volumeQuotaRuleName", + Expected: &VolumeQuotaRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + VolumeQuotaRuleName: "volumeQuotaRuleName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/volumeQuotaRules/volumeQuotaRuleName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/vOlUmEqUoTaRuLeS/vOlUmEqUoTaRuLeNaMe", + Expected: &VolumeQuotaRuleId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + VolumeQuotaRuleName: "vOlUmEqUoTaRuLeNaMe", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/vOlUmEqUoTaRuLeS/vOlUmEqUoTaRuLeNaMe/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeQuotaRuleIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + if actual.VolumeQuotaRuleName != v.Expected.VolumeQuotaRuleName { + t.Fatalf("Expected %q but got %q for VolumeQuotaRuleName", v.Expected.VolumeQuotaRuleName, actual.VolumeQuotaRuleName) + } + + } +} + +func TestSegmentsForVolumeQuotaRuleId(t *testing.T) { + segments := VolumeQuotaRuleId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeQuotaRuleId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/method_create.go b/resource-manager/netapp/2025-06-01/volumequotarules/method_create.go new file mode 100644 index 00000000000..4904525f7a0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/method_create.go @@ -0,0 +1,75 @@ +package volumequotarules + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *VolumeQuotaRule +} + +// Create ... +func (c VolumeQuotaRulesClient) Create(ctx context.Context, id VolumeQuotaRuleId, input VolumeQuotaRule) (result CreateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateThenPoll performs Create then polls until it's completed +func (c VolumeQuotaRulesClient) CreateThenPoll(ctx context.Context, id VolumeQuotaRuleId, input VolumeQuotaRule) error { + result, err := c.Create(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Create: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Create: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/method_delete.go b/resource-manager/netapp/2025-06-01/volumequotarules/method_delete.go new file mode 100644 index 00000000000..59d6fbecf0a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/method_delete.go @@ -0,0 +1,71 @@ +package volumequotarules + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c VolumeQuotaRulesClient) Delete(ctx context.Context, id VolumeQuotaRuleId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + http.StatusOK, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c VolumeQuotaRulesClient) DeleteThenPoll(ctx context.Context, id VolumeQuotaRuleId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/method_get.go b/resource-manager/netapp/2025-06-01/volumequotarules/method_get.go new file mode 100644 index 00000000000..0c82ebc839b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/method_get.go @@ -0,0 +1,53 @@ +package volumequotarules + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *VolumeQuotaRule +} + +// Get ... +func (c VolumeQuotaRulesClient) Get(ctx context.Context, id VolumeQuotaRuleId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model VolumeQuotaRule + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/method_listbyvolume.go b/resource-manager/netapp/2025-06-01/volumequotarules/method_listbyvolume.go new file mode 100644 index 00000000000..87cd029de34 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/method_listbyvolume.go @@ -0,0 +1,54 @@ +package volumequotarules + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListByVolumeOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *VolumeQuotaRulesList +} + +// ListByVolume ... +func (c VolumeQuotaRulesClient) ListByVolume(ctx context.Context, id VolumeId) (result ListByVolumeOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/volumeQuotaRules", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model VolumeQuotaRulesList + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/method_update.go b/resource-manager/netapp/2025-06-01/volumequotarules/method_update.go new file mode 100644 index 00000000000..39d68a726c3 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/method_update.go @@ -0,0 +1,75 @@ +package volumequotarules + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *VolumeQuotaRule +} + +// Update ... +func (c VolumeQuotaRulesClient) Update(ctx context.Context, id VolumeQuotaRuleId, input VolumeQuotaRulePatch) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c VolumeQuotaRulesClient) UpdateThenPoll(ctx context.Context, id VolumeQuotaRuleId, input VolumeQuotaRulePatch) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarule.go b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarule.go new file mode 100644 index 00000000000..9436f5c0008 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarule.go @@ -0,0 +1,18 @@ +package volumequotarules + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeQuotaRule struct { + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties *VolumeQuotaRulesProperties `json:"properties,omitempty"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarulepatch.go b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarulepatch.go new file mode 100644 index 00000000000..fbf72546c43 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarulepatch.go @@ -0,0 +1,9 @@ +package volumequotarules + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeQuotaRulePatch struct { + Properties *VolumeQuotaRulesProperties `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotaruleslist.go b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotaruleslist.go new file mode 100644 index 00000000000..d9e15cf8035 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotaruleslist.go @@ -0,0 +1,8 @@ +package volumequotarules + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeQuotaRulesList struct { + Value *[]VolumeQuotaRule `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarulesproperties.go b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarulesproperties.go new file mode 100644 index 00000000000..5ae037e0a23 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/model_volumequotarulesproperties.go @@ -0,0 +1,11 @@ +package volumequotarules + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeQuotaRulesProperties struct { + ProvisioningState *ProvisioningState `json:"provisioningState,omitempty"` + QuotaSizeInKiBs *int64 `json:"quotaSizeInKiBs,omitempty"` + QuotaTarget *string `json:"quotaTarget,omitempty"` + QuotaType *Type `json:"quotaType,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumequotarules/version.go b/resource-manager/netapp/2025-06-01/volumequotarules/version.go new file mode 100644 index 00000000000..7b43120f252 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumequotarules/version.go @@ -0,0 +1,10 @@ +package volumequotarules + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumequotarules/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumes/README.md b/resource-manager/netapp/2025-06-01/volumes/README.md new file mode 100644 index 00000000000..dd112fefb3f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/README.md @@ -0,0 +1,111 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumes` Documentation + +The `volumes` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumes" +``` + + +### Client Initialization + +```go +client := volumes.NewVolumesClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VolumesClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := volumes.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumes.Volume{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesClient.Delete` + +```go +ctx := context.TODO() +id := volumes.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.DeleteThenPoll(ctx, id, volumes.DefaultDeleteOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesClient.Get` + +```go +ctx := context.TODO() +id := volumes.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VolumesClient.List` + +```go +ctx := context.TODO() +id := volumes.NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `VolumesClient.PopulateAvailabilityZone` + +```go +ctx := context.TODO() +id := volumes.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.PopulateAvailabilityZoneThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesClient.Update` + +```go +ctx := context.TODO() +id := volumes.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumes.VolumePatch{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/volumes/client.go b/resource-manager/netapp/2025-06-01/volumes/client.go new file mode 100644 index 00000000000..6ead8cc3e7d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/client.go @@ -0,0 +1,26 @@ +package volumes + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesClient struct { + Client *resourcemanager.Client +} + +func NewVolumesClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumesClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumes", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumesClient: %+v", err) + } + + return &VolumesClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumes/constants.go b/resource-manager/netapp/2025-06-01/volumes/constants.go new file mode 100644 index 00000000000..64c143424a2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/constants.go @@ -0,0 +1,734 @@ +package volumes + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AcceptGrowCapacityPoolForShortTermCloneSplit string + +const ( + AcceptGrowCapacityPoolForShortTermCloneSplitAccepted AcceptGrowCapacityPoolForShortTermCloneSplit = "Accepted" + AcceptGrowCapacityPoolForShortTermCloneSplitDeclined AcceptGrowCapacityPoolForShortTermCloneSplit = "Declined" +) + +func PossibleValuesForAcceptGrowCapacityPoolForShortTermCloneSplit() []string { + return []string{ + string(AcceptGrowCapacityPoolForShortTermCloneSplitAccepted), + string(AcceptGrowCapacityPoolForShortTermCloneSplitDeclined), + } +} + +func (s *AcceptGrowCapacityPoolForShortTermCloneSplit) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAcceptGrowCapacityPoolForShortTermCloneSplit(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAcceptGrowCapacityPoolForShortTermCloneSplit(input string) (*AcceptGrowCapacityPoolForShortTermCloneSplit, error) { + vals := map[string]AcceptGrowCapacityPoolForShortTermCloneSplit{ + "accepted": AcceptGrowCapacityPoolForShortTermCloneSplitAccepted, + "declined": AcceptGrowCapacityPoolForShortTermCloneSplitDeclined, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AcceptGrowCapacityPoolForShortTermCloneSplit(input) + return &out, nil +} + +type AvsDataStore string + +const ( + AvsDataStoreDisabled AvsDataStore = "Disabled" + AvsDataStoreEnabled AvsDataStore = "Enabled" +) + +func PossibleValuesForAvsDataStore() []string { + return []string{ + string(AvsDataStoreDisabled), + string(AvsDataStoreEnabled), + } +} + +func (s *AvsDataStore) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAvsDataStore(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAvsDataStore(input string) (*AvsDataStore, error) { + vals := map[string]AvsDataStore{ + "disabled": AvsDataStoreDisabled, + "enabled": AvsDataStoreEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AvsDataStore(input) + return &out, nil +} + +type ChownMode string + +const ( + ChownModeRestricted ChownMode = "Restricted" + ChownModeUnrestricted ChownMode = "Unrestricted" +) + +func PossibleValuesForChownMode() []string { + return []string{ + string(ChownModeRestricted), + string(ChownModeUnrestricted), + } +} + +func (s *ChownMode) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseChownMode(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseChownMode(input string) (*ChownMode, error) { + vals := map[string]ChownMode{ + "restricted": ChownModeRestricted, + "unrestricted": ChownModeUnrestricted, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ChownMode(input) + return &out, nil +} + +type CoolAccessRetrievalPolicy string + +const ( + CoolAccessRetrievalPolicyDefault CoolAccessRetrievalPolicy = "Default" + CoolAccessRetrievalPolicyNever CoolAccessRetrievalPolicy = "Never" + CoolAccessRetrievalPolicyOnRead CoolAccessRetrievalPolicy = "OnRead" +) + +func PossibleValuesForCoolAccessRetrievalPolicy() []string { + return []string{ + string(CoolAccessRetrievalPolicyDefault), + string(CoolAccessRetrievalPolicyNever), + string(CoolAccessRetrievalPolicyOnRead), + } +} + +func (s *CoolAccessRetrievalPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessRetrievalPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessRetrievalPolicy(input string) (*CoolAccessRetrievalPolicy, error) { + vals := map[string]CoolAccessRetrievalPolicy{ + "default": CoolAccessRetrievalPolicyDefault, + "never": CoolAccessRetrievalPolicyNever, + "onread": CoolAccessRetrievalPolicyOnRead, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessRetrievalPolicy(input) + return &out, nil +} + +type CoolAccessTieringPolicy string + +const ( + CoolAccessTieringPolicyAuto CoolAccessTieringPolicy = "Auto" + CoolAccessTieringPolicySnapshotOnly CoolAccessTieringPolicy = "SnapshotOnly" +) + +func PossibleValuesForCoolAccessTieringPolicy() []string { + return []string{ + string(CoolAccessTieringPolicyAuto), + string(CoolAccessTieringPolicySnapshotOnly), + } +} + +func (s *CoolAccessTieringPolicy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCoolAccessTieringPolicy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCoolAccessTieringPolicy(input string) (*CoolAccessTieringPolicy, error) { + vals := map[string]CoolAccessTieringPolicy{ + "auto": CoolAccessTieringPolicyAuto, + "snapshotonly": CoolAccessTieringPolicySnapshotOnly, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CoolAccessTieringPolicy(input) + return &out, nil +} + +type EnableSubvolumes string + +const ( + EnableSubvolumesDisabled EnableSubvolumes = "Disabled" + EnableSubvolumesEnabled EnableSubvolumes = "Enabled" +) + +func PossibleValuesForEnableSubvolumes() []string { + return []string{ + string(EnableSubvolumesDisabled), + string(EnableSubvolumesEnabled), + } +} + +func (s *EnableSubvolumes) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEnableSubvolumes(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEnableSubvolumes(input string) (*EnableSubvolumes, error) { + vals := map[string]EnableSubvolumes{ + "disabled": EnableSubvolumesDisabled, + "enabled": EnableSubvolumesEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EnableSubvolumes(input) + return &out, nil +} + +type EncryptionKeySource string + +const ( + EncryptionKeySourceMicrosoftPointKeyVault EncryptionKeySource = "Microsoft.KeyVault" + EncryptionKeySourceMicrosoftPointNetApp EncryptionKeySource = "Microsoft.NetApp" +) + +func PossibleValuesForEncryptionKeySource() []string { + return []string{ + string(EncryptionKeySourceMicrosoftPointKeyVault), + string(EncryptionKeySourceMicrosoftPointNetApp), + } +} + +func (s *EncryptionKeySource) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEncryptionKeySource(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEncryptionKeySource(input string) (*EncryptionKeySource, error) { + vals := map[string]EncryptionKeySource{ + "microsoft.keyvault": EncryptionKeySourceMicrosoftPointKeyVault, + "microsoft.netapp": EncryptionKeySourceMicrosoftPointNetApp, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EncryptionKeySource(input) + return &out, nil +} + +type EndpointType string + +const ( + EndpointTypeDst EndpointType = "dst" + EndpointTypeSrc EndpointType = "src" +) + +func PossibleValuesForEndpointType() []string { + return []string{ + string(EndpointTypeDst), + string(EndpointTypeSrc), + } +} + +func (s *EndpointType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEndpointType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEndpointType(input string) (*EndpointType, error) { + vals := map[string]EndpointType{ + "dst": EndpointTypeDst, + "src": EndpointTypeSrc, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EndpointType(input) + return &out, nil +} + +type FileAccessLogs string + +const ( + FileAccessLogsDisabled FileAccessLogs = "Disabled" + FileAccessLogsEnabled FileAccessLogs = "Enabled" +) + +func PossibleValuesForFileAccessLogs() []string { + return []string{ + string(FileAccessLogsDisabled), + string(FileAccessLogsEnabled), + } +} + +func (s *FileAccessLogs) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseFileAccessLogs(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseFileAccessLogs(input string) (*FileAccessLogs, error) { + vals := map[string]FileAccessLogs{ + "disabled": FileAccessLogsDisabled, + "enabled": FileAccessLogsEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := FileAccessLogs(input) + return &out, nil +} + +type NetworkFeatures string + +const ( + NetworkFeaturesBasic NetworkFeatures = "Basic" + NetworkFeaturesBasicStandard NetworkFeatures = "Basic_Standard" + NetworkFeaturesStandard NetworkFeatures = "Standard" + NetworkFeaturesStandardBasic NetworkFeatures = "Standard_Basic" +) + +func PossibleValuesForNetworkFeatures() []string { + return []string{ + string(NetworkFeaturesBasic), + string(NetworkFeaturesBasicStandard), + string(NetworkFeaturesStandard), + string(NetworkFeaturesStandardBasic), + } +} + +func (s *NetworkFeatures) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseNetworkFeatures(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseNetworkFeatures(input string) (*NetworkFeatures, error) { + vals := map[string]NetworkFeatures{ + "basic": NetworkFeaturesBasic, + "basic_standard": NetworkFeaturesBasicStandard, + "standard": NetworkFeaturesStandard, + "standard_basic": NetworkFeaturesStandardBasic, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := NetworkFeatures(input) + return &out, nil +} + +type ReplicationSchedule string + +const ( + ReplicationScheduleDaily ReplicationSchedule = "daily" + ReplicationScheduleHourly ReplicationSchedule = "hourly" + ReplicationScheduleOneZerominutely ReplicationSchedule = "_10minutely" +) + +func PossibleValuesForReplicationSchedule() []string { + return []string{ + string(ReplicationScheduleDaily), + string(ReplicationScheduleHourly), + string(ReplicationScheduleOneZerominutely), + } +} + +func (s *ReplicationSchedule) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationSchedule(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationSchedule(input string) (*ReplicationSchedule, error) { + vals := map[string]ReplicationSchedule{ + "daily": ReplicationScheduleDaily, + "hourly": ReplicationScheduleHourly, + "_10minutely": ReplicationScheduleOneZerominutely, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationSchedule(input) + return &out, nil +} + +type ReplicationType string + +const ( + ReplicationTypeCrossRegionReplication ReplicationType = "CrossRegionReplication" + ReplicationTypeCrossZoneReplication ReplicationType = "CrossZoneReplication" +) + +func PossibleValuesForReplicationType() []string { + return []string{ + string(ReplicationTypeCrossRegionReplication), + string(ReplicationTypeCrossZoneReplication), + } +} + +func (s *ReplicationType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationType(input string) (*ReplicationType, error) { + vals := map[string]ReplicationType{ + "crossregionreplication": ReplicationTypeCrossRegionReplication, + "crosszonereplication": ReplicationTypeCrossZoneReplication, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationType(input) + return &out, nil +} + +type SecurityStyle string + +const ( + SecurityStyleNtfs SecurityStyle = "ntfs" + SecurityStyleUnix SecurityStyle = "unix" +) + +func PossibleValuesForSecurityStyle() []string { + return []string{ + string(SecurityStyleNtfs), + string(SecurityStyleUnix), + } +} + +func (s *SecurityStyle) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSecurityStyle(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSecurityStyle(input string) (*SecurityStyle, error) { + vals := map[string]SecurityStyle{ + "ntfs": SecurityStyleNtfs, + "unix": SecurityStyleUnix, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SecurityStyle(input) + return &out, nil +} + +type ServiceLevel string + +const ( + ServiceLevelFlexible ServiceLevel = "Flexible" + ServiceLevelPremium ServiceLevel = "Premium" + ServiceLevelStandard ServiceLevel = "Standard" + ServiceLevelStandardZRS ServiceLevel = "StandardZRS" + ServiceLevelUltra ServiceLevel = "Ultra" +) + +func PossibleValuesForServiceLevel() []string { + return []string{ + string(ServiceLevelFlexible), + string(ServiceLevelPremium), + string(ServiceLevelStandard), + string(ServiceLevelStandardZRS), + string(ServiceLevelUltra), + } +} + +func (s *ServiceLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseServiceLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseServiceLevel(input string) (*ServiceLevel, error) { + vals := map[string]ServiceLevel{ + "flexible": ServiceLevelFlexible, + "premium": ServiceLevelPremium, + "standard": ServiceLevelStandard, + "standardzrs": ServiceLevelStandardZRS, + "ultra": ServiceLevelUltra, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ServiceLevel(input) + return &out, nil +} + +type SmbAccessBasedEnumeration string + +const ( + SmbAccessBasedEnumerationDisabled SmbAccessBasedEnumeration = "Disabled" + SmbAccessBasedEnumerationEnabled SmbAccessBasedEnumeration = "Enabled" +) + +func PossibleValuesForSmbAccessBasedEnumeration() []string { + return []string{ + string(SmbAccessBasedEnumerationDisabled), + string(SmbAccessBasedEnumerationEnabled), + } +} + +func (s *SmbAccessBasedEnumeration) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbAccessBasedEnumeration(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbAccessBasedEnumeration(input string) (*SmbAccessBasedEnumeration, error) { + vals := map[string]SmbAccessBasedEnumeration{ + "disabled": SmbAccessBasedEnumerationDisabled, + "enabled": SmbAccessBasedEnumerationEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbAccessBasedEnumeration(input) + return &out, nil +} + +type SmbNonBrowsable string + +const ( + SmbNonBrowsableDisabled SmbNonBrowsable = "Disabled" + SmbNonBrowsableEnabled SmbNonBrowsable = "Enabled" +) + +func PossibleValuesForSmbNonBrowsable() []string { + return []string{ + string(SmbNonBrowsableDisabled), + string(SmbNonBrowsableEnabled), + } +} + +func (s *SmbNonBrowsable) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSmbNonBrowsable(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSmbNonBrowsable(input string) (*SmbNonBrowsable, error) { + vals := map[string]SmbNonBrowsable{ + "disabled": SmbNonBrowsableDisabled, + "enabled": SmbNonBrowsableEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SmbNonBrowsable(input) + return &out, nil +} + +type VolumeStorageToNetworkProximity string + +const ( + VolumeStorageToNetworkProximityAcrossTTwo VolumeStorageToNetworkProximity = "AcrossT2" + VolumeStorageToNetworkProximityDefault VolumeStorageToNetworkProximity = "Default" + VolumeStorageToNetworkProximityTOne VolumeStorageToNetworkProximity = "T1" + VolumeStorageToNetworkProximityTTwo VolumeStorageToNetworkProximity = "T2" +) + +func PossibleValuesForVolumeStorageToNetworkProximity() []string { + return []string{ + string(VolumeStorageToNetworkProximityAcrossTTwo), + string(VolumeStorageToNetworkProximityDefault), + string(VolumeStorageToNetworkProximityTOne), + string(VolumeStorageToNetworkProximityTTwo), + } +} + +func (s *VolumeStorageToNetworkProximity) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseVolumeStorageToNetworkProximity(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseVolumeStorageToNetworkProximity(input string) (*VolumeStorageToNetworkProximity, error) { + vals := map[string]VolumeStorageToNetworkProximity{ + "acrosst2": VolumeStorageToNetworkProximityAcrossTTwo, + "default": VolumeStorageToNetworkProximityDefault, + "t1": VolumeStorageToNetworkProximityTOne, + "t2": VolumeStorageToNetworkProximityTTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VolumeStorageToNetworkProximity(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumes/id_capacitypool.go b/resource-manager/netapp/2025-06-01/volumes/id_capacitypool.go new file mode 100644 index 00000000000..4a5cb95b3e2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/id_capacitypool.go @@ -0,0 +1,139 @@ +package volumes + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&CapacityPoolId{}) +} + +var _ resourceids.ResourceId = &CapacityPoolId{} + +// CapacityPoolId is a struct representing the Resource ID for a Capacity Pool +type CapacityPoolId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string +} + +// NewCapacityPoolID returns a new CapacityPoolId struct +func NewCapacityPoolID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string) CapacityPoolId { + return CapacityPoolId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + } +} + +// ParseCapacityPoolID parses 'input' into a CapacityPoolId +func ParseCapacityPoolID(input string) (*CapacityPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&CapacityPoolId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := CapacityPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseCapacityPoolIDInsensitively parses 'input' case-insensitively into a CapacityPoolId +// note: this method should only be used for API response data and not user input +func ParseCapacityPoolIDInsensitively(input string) (*CapacityPoolId, error) { + parser := resourceids.NewParserFromResourceIdType(&CapacityPoolId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := CapacityPoolId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *CapacityPoolId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + return nil +} + +// ValidateCapacityPoolID checks that 'input' can be parsed as a Capacity Pool ID +func ValidateCapacityPoolID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseCapacityPoolID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Capacity Pool ID +func (id CapacityPoolId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Capacity Pool ID +func (id CapacityPoolId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + } +} + +// String returns a human-readable description of this Capacity Pool ID +func (id CapacityPoolId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + } + return fmt.Sprintf("Capacity Pool (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumes/id_capacitypool_test.go b/resource-manager/netapp/2025-06-01/volumes/id_capacitypool_test.go new file mode 100644 index 00000000000..c84c57d8196 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/id_capacitypool_test.go @@ -0,0 +1,327 @@ +package volumes + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &CapacityPoolId{} + +func TestNewCapacityPoolID(t *testing.T) { + id := NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } +} + +func TestFormatCapacityPoolID(t *testing.T) { + actual := NewCapacityPoolID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseCapacityPoolID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CapacityPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Expected: &CapacityPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCapacityPoolID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + } +} + +func TestParseCapacityPoolIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *CapacityPoolId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Expected: &CapacityPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Expected: &CapacityPoolId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseCapacityPoolIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + } +} + +func TestSegmentsForCapacityPoolId(t *testing.T) { + segments := CapacityPoolId{}.Segments() + if len(segments) == 0 { + t.Fatalf("CapacityPoolId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumes/id_volume.go b/resource-manager/netapp/2025-06-01/volumes/id_volume.go new file mode 100644 index 00000000000..9335df28621 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/id_volume.go @@ -0,0 +1,148 @@ +package volumes + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumes/id_volume_test.go b/resource-manager/netapp/2025-06-01/volumes/id_volume_test.go new file mode 100644 index 00000000000..a08cc9318a1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/id_volume_test.go @@ -0,0 +1,372 @@ +package volumes + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumes/method_createorupdate.go b/resource-manager/netapp/2025-06-01/volumes/method_createorupdate.go new file mode 100644 index 00000000000..fe291f378c8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/method_createorupdate.go @@ -0,0 +1,76 @@ +package volumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type CreateOrUpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Volume +} + +// CreateOrUpdate ... +func (c VolumesClient) CreateOrUpdate(ctx context.Context, id VolumeId, input Volume) (result CreateOrUpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusCreated, + http.StatusOK, + }, + HttpMethod: http.MethodPut, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c VolumesClient) CreateOrUpdateThenPoll(ctx context.Context, id VolumeId, input Volume) error { + result, err := c.CreateOrUpdate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing CreateOrUpdate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumes/method_delete.go b/resource-manager/netapp/2025-06-01/volumes/method_delete.go new file mode 100644 index 00000000000..c449fa7d2b8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/method_delete.go @@ -0,0 +1,99 @@ +package volumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +type DeleteOperationOptions struct { + ForceDelete *bool +} + +func DefaultDeleteOperationOptions() DeleteOperationOptions { + return DeleteOperationOptions{} +} + +func (o DeleteOperationOptions) ToHeaders() *client.Headers { + out := client.Headers{} + + return &out +} + +func (o DeleteOperationOptions) ToOData() *odata.Query { + out := odata.Query{} + + return &out +} + +func (o DeleteOperationOptions) ToQuery() *client.QueryParams { + out := client.QueryParams{} + if o.ForceDelete != nil { + out.Append("forceDelete", fmt.Sprintf("%v", *o.ForceDelete)) + } + return &out +} + +// Delete ... +func (c VolumesClient) Delete(ctx context.Context, id VolumeId, options DeleteOperationOptions) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + OptionsObject: options, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c VolumesClient) DeleteThenPoll(ctx context.Context, id VolumeId, options DeleteOperationOptions) error { + result, err := c.Delete(ctx, id, options) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumes/method_get.go b/resource-manager/netapp/2025-06-01/volumes/method_get.go new file mode 100644 index 00000000000..1f99d951818 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/method_get.go @@ -0,0 +1,53 @@ +package volumes + +import ( + "context" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type GetOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *Volume +} + +// Get ... +func (c VolumesClient) Get(ctx context.Context, id VolumeId) (result GetOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model Volume + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/volumes/method_list.go b/resource-manager/netapp/2025-06-01/volumes/method_list.go new file mode 100644 index 00000000000..443ce94a02d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/method_list.go @@ -0,0 +1,105 @@ +package volumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]Volume +} + +type ListCompleteResult struct { + LatestHttpResponse *http.Response + Items []Volume +} + +type ListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// List ... +func (c VolumesClient) List(ctx context.Context, id CapacityPoolId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListCustomPager{}, + Path: fmt.Sprintf("%s/volumes", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]Volume `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListComplete retrieves all the results into a single object +func (c VolumesClient) ListComplete(ctx context.Context, id CapacityPoolId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, VolumeOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c VolumesClient) ListCompleteMatchingPredicate(ctx context.Context, id CapacityPoolId, predicate VolumeOperationPredicate) (result ListCompleteResult, err error) { + items := make([]Volume, 0) + + resp, err := c.List(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/netapp/2025-06-01/volumes/method_populateavailabilityzone.go b/resource-manager/netapp/2025-06-01/volumes/method_populateavailabilityzone.go new file mode 100644 index 00000000000..052b13f3fd6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/method_populateavailabilityzone.go @@ -0,0 +1,71 @@ +package volumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PopulateAvailabilityZoneOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Volume +} + +// PopulateAvailabilityZone ... +func (c VolumesClient) PopulateAvailabilityZone(ctx context.Context, id VolumeId) (result PopulateAvailabilityZoneOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/populateAvailabilityZone", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// PopulateAvailabilityZoneThenPoll performs PopulateAvailabilityZone then polls until it's completed +func (c VolumesClient) PopulateAvailabilityZoneThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.PopulateAvailabilityZone(ctx, id) + if err != nil { + return fmt.Errorf("performing PopulateAvailabilityZone: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after PopulateAvailabilityZone: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumes/method_update.go b/resource-manager/netapp/2025-06-01/volumes/method_update.go new file mode 100644 index 00000000000..8a9aedb619c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/method_update.go @@ -0,0 +1,75 @@ +package volumes + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type UpdateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *Volume +} + +// Update ... +func (c VolumesClient) Update(ctx context.Context, id VolumeId, input VolumePatch) (result UpdateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPatch, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// UpdateThenPoll performs Update then polls until it's completed +func (c VolumesClient) UpdateThenPoll(ctx context.Context, id VolumeId, input VolumePatch) error { + result, err := c.Update(ctx, id, input) + if err != nil { + return fmt.Errorf("performing Update: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Update: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_destinationreplication.go b/resource-manager/netapp/2025-06-01/volumes/model_destinationreplication.go new file mode 100644 index 00000000000..da6e1da9489 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_destinationreplication.go @@ -0,0 +1,11 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DestinationReplication struct { + Region *string `json:"region,omitempty"` + ReplicationType *ReplicationType `json:"replicationType,omitempty"` + ResourceId *string `json:"resourceId,omitempty"` + Zone *string `json:"zone,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_exportpolicyrule.go b/resource-manager/netapp/2025-06-01/volumes/model_exportpolicyrule.go new file mode 100644 index 00000000000..9b059c74757 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_exportpolicyrule.go @@ -0,0 +1,22 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ExportPolicyRule struct { + AllowedClients *string `json:"allowedClients,omitempty"` + ChownMode *ChownMode `json:"chownMode,omitempty"` + Cifs *bool `json:"cifs,omitempty"` + HasRootAccess *bool `json:"hasRootAccess,omitempty"` + Kerberos5ReadOnly *bool `json:"kerberos5ReadOnly,omitempty"` + Kerberos5ReadWrite *bool `json:"kerberos5ReadWrite,omitempty"` + Kerberos5iReadOnly *bool `json:"kerberos5iReadOnly,omitempty"` + Kerberos5iReadWrite *bool `json:"kerberos5iReadWrite,omitempty"` + Kerberos5pReadOnly *bool `json:"kerberos5pReadOnly,omitempty"` + Kerberos5pReadWrite *bool `json:"kerberos5pReadWrite,omitempty"` + Nfsv3 *bool `json:"nfsv3,omitempty"` + Nfsv41 *bool `json:"nfsv41,omitempty"` + RuleIndex *int64 `json:"ruleIndex,omitempty"` + UnixReadOnly *bool `json:"unixReadOnly,omitempty"` + UnixReadWrite *bool `json:"unixReadWrite,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_mounttargetproperties.go b/resource-manager/netapp/2025-06-01/volumes/model_mounttargetproperties.go new file mode 100644 index 00000000000..7f5c238183c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_mounttargetproperties.go @@ -0,0 +1,11 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type MountTargetProperties struct { + FileSystemId string `json:"fileSystemId"` + IPAddress *string `json:"ipAddress,omitempty"` + MountTargetId *string `json:"mountTargetId,omitempty"` + SmbServerFqdn *string `json:"smbServerFqdn,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_placementkeyvaluepairs.go b/resource-manager/netapp/2025-06-01/volumes/model_placementkeyvaluepairs.go new file mode 100644 index 00000000000..588e9729c1e --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_placementkeyvaluepairs.go @@ -0,0 +1,9 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PlacementKeyValuePairs struct { + Key string `json:"key"` + Value string `json:"value"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_remotepath.go b/resource-manager/netapp/2025-06-01/volumes/model_remotepath.go new file mode 100644 index 00000000000..b283879aae7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_remotepath.go @@ -0,0 +1,10 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RemotePath struct { + ExternalHostName string `json:"externalHostName"` + ServerName string `json:"serverName"` + VolumeName string `json:"volumeName"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_replicationobject.go b/resource-manager/netapp/2025-06-01/volumes/model_replicationobject.go new file mode 100644 index 00000000000..eda99d55c74 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_replicationobject.go @@ -0,0 +1,14 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReplicationObject struct { + DestinationReplications *[]DestinationReplication `json:"destinationReplications,omitempty"` + EndpointType *EndpointType `json:"endpointType,omitempty"` + RemotePath *RemotePath `json:"remotePath,omitempty"` + RemoteVolumeRegion *string `json:"remoteVolumeRegion,omitempty"` + RemoteVolumeResourceId *string `json:"remoteVolumeResourceId,omitempty"` + ReplicationId *string `json:"replicationId,omitempty"` + ReplicationSchedule *ReplicationSchedule `json:"replicationSchedule,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volume.go b/resource-manager/netapp/2025-06-01/volumes/model_volume.go new file mode 100644 index 00000000000..1679818444b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volume.go @@ -0,0 +1,21 @@ +package volumes + +import ( + "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" + "github.com/hashicorp/go-azure-helpers/resourcemanager/zones" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Volume struct { + Etag *string `json:"etag,omitempty"` + Id *string `json:"id,omitempty"` + Location string `json:"location"` + Name *string `json:"name,omitempty"` + Properties VolumeProperties `json:"properties"` + SystemData *systemdata.SystemData `json:"systemData,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` + Zones *zones.Schema `json:"zones,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumebackupproperties.go b/resource-manager/netapp/2025-06-01/volumes/model_volumebackupproperties.go new file mode 100644 index 00000000000..a74e6fd4d16 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumebackupproperties.go @@ -0,0 +1,10 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeBackupProperties struct { + BackupPolicyId *string `json:"backupPolicyId,omitempty"` + BackupVaultId *string `json:"backupVaultId,omitempty"` + PolicyEnforced *bool `json:"policyEnforced,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumepatch.go b/resource-manager/netapp/2025-06-01/volumes/model_volumepatch.go new file mode 100644 index 00000000000..b39ea01aee8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumepatch.go @@ -0,0 +1,13 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePatch struct { + Id *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *VolumePatchProperties `json:"properties,omitempty"` + Tags *map[string]string `json:"tags,omitempty"` + Type *string `json:"type,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumepatchproperties.go b/resource-manager/netapp/2025-06-01/volumes/model_volumepatchproperties.go new file mode 100644 index 00000000000..e95d33a193b --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumepatchproperties.go @@ -0,0 +1,24 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePatchProperties struct { + CoolAccess *bool `json:"coolAccess,omitempty"` + CoolAccessRetrievalPolicy *CoolAccessRetrievalPolicy `json:"coolAccessRetrievalPolicy,omitempty"` + CoolAccessTieringPolicy *CoolAccessTieringPolicy `json:"coolAccessTieringPolicy,omitempty"` + CoolnessPeriod *int64 `json:"coolnessPeriod,omitempty"` + DataProtection *VolumePatchPropertiesDataProtection `json:"dataProtection,omitempty"` + DefaultGroupQuotaInKiBs *int64 `json:"defaultGroupQuotaInKiBs,omitempty"` + DefaultUserQuotaInKiBs *int64 `json:"defaultUserQuotaInKiBs,omitempty"` + ExportPolicy *VolumePatchPropertiesExportPolicy `json:"exportPolicy,omitempty"` + IsDefaultQuotaEnabled *bool `json:"isDefaultQuotaEnabled,omitempty"` + ProtocolTypes *[]string `json:"protocolTypes,omitempty"` + ServiceLevel *ServiceLevel `json:"serviceLevel,omitempty"` + SmbAccessBasedEnumeration *SmbAccessBasedEnumeration `json:"smbAccessBasedEnumeration,omitempty"` + SmbNonBrowsable *SmbNonBrowsable `json:"smbNonBrowsable,omitempty"` + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty"` + ThroughputMibps *float64 `json:"throughputMibps,omitempty"` + UnixPermissions *string `json:"unixPermissions,omitempty"` + UsageThreshold *int64 `json:"usageThreshold,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumepatchpropertiesdataprotection.go b/resource-manager/netapp/2025-06-01/volumes/model_volumepatchpropertiesdataprotection.go new file mode 100644 index 00000000000..8c9edc10ff7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumepatchpropertiesdataprotection.go @@ -0,0 +1,9 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePatchPropertiesDataProtection struct { + Backup *VolumeBackupProperties `json:"backup,omitempty"` + Snapshot *VolumeSnapshotProperties `json:"snapshot,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumepatchpropertiesexportpolicy.go b/resource-manager/netapp/2025-06-01/volumes/model_volumepatchpropertiesexportpolicy.go new file mode 100644 index 00000000000..ae83a5fc1b4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumepatchpropertiesexportpolicy.go @@ -0,0 +1,8 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePatchPropertiesExportPolicy struct { + Rules *[]ExportPolicyRule `json:"rules,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumeproperties.go b/resource-manager/netapp/2025-06-01/volumes/model_volumeproperties.go new file mode 100644 index 00000000000..77902379c5a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumeproperties.go @@ -0,0 +1,65 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeProperties struct { + AcceptGrowCapacityPoolForShortTermCloneSplit *AcceptGrowCapacityPoolForShortTermCloneSplit `json:"acceptGrowCapacityPoolForShortTermCloneSplit,omitempty"` + ActualThroughputMibps *float64 `json:"actualThroughputMibps,omitempty"` + AvsDataStore *AvsDataStore `json:"avsDataStore,omitempty"` + BackupId *string `json:"backupId,omitempty"` + BaremetalTenantId *string `json:"baremetalTenantId,omitempty"` + CapacityPoolResourceId *string `json:"capacityPoolResourceId,omitempty"` + CloneProgress *int64 `json:"cloneProgress,omitempty"` + CoolAccess *bool `json:"coolAccess,omitempty"` + CoolAccessRetrievalPolicy *CoolAccessRetrievalPolicy `json:"coolAccessRetrievalPolicy,omitempty"` + CoolAccessTieringPolicy *CoolAccessTieringPolicy `json:"coolAccessTieringPolicy,omitempty"` + CoolnessPeriod *int64 `json:"coolnessPeriod,omitempty"` + CreationToken string `json:"creationToken"` + DataProtection *VolumePropertiesDataProtection `json:"dataProtection,omitempty"` + DataStoreResourceId *[]string `json:"dataStoreResourceId,omitempty"` + DefaultGroupQuotaInKiBs *int64 `json:"defaultGroupQuotaInKiBs,omitempty"` + DefaultUserQuotaInKiBs *int64 `json:"defaultUserQuotaInKiBs,omitempty"` + DeleteBaseSnapshot *bool `json:"deleteBaseSnapshot,omitempty"` + EffectiveNetworkFeatures *NetworkFeatures `json:"effectiveNetworkFeatures,omitempty"` + EnableSubvolumes *EnableSubvolumes `json:"enableSubvolumes,omitempty"` + Encrypted *bool `json:"encrypted,omitempty"` + EncryptionKeySource *EncryptionKeySource `json:"encryptionKeySource,omitempty"` + ExportPolicy *VolumePropertiesExportPolicy `json:"exportPolicy,omitempty"` + FileAccessLogs *FileAccessLogs `json:"fileAccessLogs,omitempty"` + FileSystemId *string `json:"fileSystemId,omitempty"` + InheritedSizeInBytes *int64 `json:"inheritedSizeInBytes,omitempty"` + IsDefaultQuotaEnabled *bool `json:"isDefaultQuotaEnabled,omitempty"` + IsLargeVolume *bool `json:"isLargeVolume,omitempty"` + IsRestoring *bool `json:"isRestoring,omitempty"` + KerberosEnabled *bool `json:"kerberosEnabled,omitempty"` + KeyVaultPrivateEndpointResourceId *string `json:"keyVaultPrivateEndpointResourceId,omitempty"` + LdapEnabled *bool `json:"ldapEnabled,omitempty"` + MaximumNumberOfFiles *int64 `json:"maximumNumberOfFiles,omitempty"` + MountTargets *[]MountTargetProperties `json:"mountTargets,omitempty"` + NetworkFeatures *NetworkFeatures `json:"networkFeatures,omitempty"` + NetworkSiblingSetId *string `json:"networkSiblingSetId,omitempty"` + OriginatingResourceId *string `json:"originatingResourceId,omitempty"` + PlacementRules *[]PlacementKeyValuePairs `json:"placementRules,omitempty"` + ProtocolTypes *[]string `json:"protocolTypes,omitempty"` + ProvisionedAvailabilityZone *string `json:"provisionedAvailabilityZone,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + ProximityPlacementGroup *string `json:"proximityPlacementGroup,omitempty"` + SecurityStyle *SecurityStyle `json:"securityStyle,omitempty"` + ServiceLevel *ServiceLevel `json:"serviceLevel,omitempty"` + SmbAccessBasedEnumeration *SmbAccessBasedEnumeration `json:"smbAccessBasedEnumeration,omitempty"` + SmbContinuouslyAvailable *bool `json:"smbContinuouslyAvailable,omitempty"` + SmbEncryption *bool `json:"smbEncryption,omitempty"` + SmbNonBrowsable *SmbNonBrowsable `json:"smbNonBrowsable,omitempty"` + SnapshotDirectoryVisible *bool `json:"snapshotDirectoryVisible,omitempty"` + SnapshotId *string `json:"snapshotId,omitempty"` + StorageToNetworkProximity *VolumeStorageToNetworkProximity `json:"storageToNetworkProximity,omitempty"` + SubnetId string `json:"subnetId"` + T2Network *string `json:"t2Network,omitempty"` + ThroughputMibps *float64 `json:"throughputMibps,omitempty"` + UnixPermissions *string `json:"unixPermissions,omitempty"` + UsageThreshold int64 `json:"usageThreshold"` + VolumeGroupName *string `json:"volumeGroupName,omitempty"` + VolumeSpecName *string `json:"volumeSpecName,omitempty"` + VolumeType *string `json:"volumeType,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumepropertiesdataprotection.go b/resource-manager/netapp/2025-06-01/volumes/model_volumepropertiesdataprotection.go new file mode 100644 index 00000000000..fcf952952d2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumepropertiesdataprotection.go @@ -0,0 +1,11 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesDataProtection struct { + Backup *VolumeBackupProperties `json:"backup,omitempty"` + Replication *ReplicationObject `json:"replication,omitempty"` + Snapshot *VolumeSnapshotProperties `json:"snapshot,omitempty"` + VolumeRelocation *VolumeRelocationProperties `json:"volumeRelocation,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumepropertiesexportpolicy.go b/resource-manager/netapp/2025-06-01/volumes/model_volumepropertiesexportpolicy.go new file mode 100644 index 00000000000..fda5f4739de --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumepropertiesexportpolicy.go @@ -0,0 +1,8 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumePropertiesExportPolicy struct { + Rules *[]ExportPolicyRule `json:"rules,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumerelocationproperties.go b/resource-manager/netapp/2025-06-01/volumes/model_volumerelocationproperties.go new file mode 100644 index 00000000000..23274832779 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumerelocationproperties.go @@ -0,0 +1,9 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeRelocationProperties struct { + ReadyToBeFinalized *bool `json:"readyToBeFinalized,omitempty"` + RelocationRequested *bool `json:"relocationRequested,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/model_volumesnapshotproperties.go b/resource-manager/netapp/2025-06-01/volumes/model_volumesnapshotproperties.go new file mode 100644 index 00000000000..48aba2acce0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/model_volumesnapshotproperties.go @@ -0,0 +1,8 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeSnapshotProperties struct { + SnapshotPolicyId *string `json:"snapshotPolicyId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumes/predicates.go b/resource-manager/netapp/2025-06-01/volumes/predicates.go new file mode 100644 index 00000000000..fcc54ae5e26 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/predicates.go @@ -0,0 +1,37 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeOperationPredicate struct { + Etag *string + Id *string + Location *string + Name *string + Type *string +} + +func (p VolumeOperationPredicate) Matches(input Volume) bool { + + if p.Etag != nil && (input.Etag == nil || *p.Etag != *input.Etag) { + return false + } + + if p.Id != nil && (input.Id == nil || *p.Id != *input.Id) { + return false + } + + if p.Location != nil && *p.Location != input.Location { + return false + } + + if p.Name != nil && (input.Name == nil || *p.Name != *input.Name) { + return false + } + + if p.Type != nil && (input.Type == nil || *p.Type != *input.Type) { + return false + } + + return true +} diff --git a/resource-manager/netapp/2025-06-01/volumes/version.go b/resource-manager/netapp/2025-06-01/volumes/version.go new file mode 100644 index 00000000000..ff2871f5199 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumes/version.go @@ -0,0 +1,10 @@ +package volumes + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumes/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/README.md b/resource-manager/netapp/2025-06-01/volumesonpremmigration/README.md new file mode 100644 index 00000000000..13412611320 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/README.md @@ -0,0 +1,61 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesonpremmigration` Documentation + +The `volumesonpremmigration` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesonpremmigration" +``` + + +### Client Initialization + +```go +client := volumesonpremmigration.NewVolumesOnPremMigrationClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VolumesOnPremMigrationClient.VolumesAuthorizeExternalReplication` + +```go +ctx := context.TODO() +id := volumesonpremmigration.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesAuthorizeExternalReplicationThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesOnPremMigrationClient.VolumesPeerExternalCluster` + +```go +ctx := context.TODO() +id := volumesonpremmigration.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumesonpremmigration.PeerClusterForVolumeMigrationRequest{ + // ... +} + + +if err := client.VolumesPeerExternalClusterThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesOnPremMigrationClient.VolumesPerformReplicationTransfer` + +```go +ctx := context.TODO() +id := volumesonpremmigration.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesPerformReplicationTransferThenPoll(ctx, id); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/client.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/client.go new file mode 100644 index 00000000000..b858bb952cd --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/client.go @@ -0,0 +1,26 @@ +package volumesonpremmigration + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesOnPremMigrationClient struct { + Client *resourcemanager.Client +} + +func NewVolumesOnPremMigrationClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumesOnPremMigrationClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumesonpremmigration", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumesOnPremMigrationClient: %+v", err) + } + + return &VolumesOnPremMigrationClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/id_volume.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/id_volume.go new file mode 100644 index 00000000000..6dcc938a6b7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/id_volume.go @@ -0,0 +1,148 @@ +package volumesonpremmigration + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/id_volume_test.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/id_volume_test.go new file mode 100644 index 00000000000..7222936b0d8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/id_volume_test.go @@ -0,0 +1,372 @@ +package volumesonpremmigration + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumesauthorizeexternalreplication.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumesauthorizeexternalreplication.go new file mode 100644 index 00000000000..45c0de7126f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumesauthorizeexternalreplication.go @@ -0,0 +1,71 @@ +package volumesonpremmigration + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesAuthorizeExternalReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *SVMPeerCommandResponse +} + +// VolumesAuthorizeExternalReplication ... +func (c VolumesOnPremMigrationClient) VolumesAuthorizeExternalReplication(ctx context.Context, id VolumeId) (result VolumesAuthorizeExternalReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/authorizeExternalReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesAuthorizeExternalReplicationThenPoll performs VolumesAuthorizeExternalReplication then polls until it's completed +func (c VolumesOnPremMigrationClient) VolumesAuthorizeExternalReplicationThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesAuthorizeExternalReplication(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesAuthorizeExternalReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesAuthorizeExternalReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumespeerexternalcluster.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumespeerexternalcluster.go new file mode 100644 index 00000000000..e92a82727bf --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumespeerexternalcluster.go @@ -0,0 +1,75 @@ +package volumesonpremmigration + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesPeerExternalClusterOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData + Model *ClusterPeerCommandResponse +} + +// VolumesPeerExternalCluster ... +func (c VolumesOnPremMigrationClient) VolumesPeerExternalCluster(ctx context.Context, id VolumeId, input PeerClusterForVolumeMigrationRequest) (result VolumesPeerExternalClusterOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/peerExternalCluster", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesPeerExternalClusterThenPoll performs VolumesPeerExternalCluster then polls until it's completed +func (c VolumesOnPremMigrationClient) VolumesPeerExternalClusterThenPoll(ctx context.Context, id VolumeId, input PeerClusterForVolumeMigrationRequest) error { + result, err := c.VolumesPeerExternalCluster(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesPeerExternalCluster: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesPeerExternalCluster: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumesperformreplicationtransfer.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumesperformreplicationtransfer.go new file mode 100644 index 00000000000..0ba727cd4ac --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/method_volumesperformreplicationtransfer.go @@ -0,0 +1,69 @@ +package volumesonpremmigration + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesPerformReplicationTransferOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesPerformReplicationTransfer ... +func (c VolumesOnPremMigrationClient) VolumesPerformReplicationTransfer(ctx context.Context, id VolumeId) (result VolumesPerformReplicationTransferOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/performReplicationTransfer", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesPerformReplicationTransferThenPoll performs VolumesPerformReplicationTransfer then polls until it's completed +func (c VolumesOnPremMigrationClient) VolumesPerformReplicationTransferThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesPerformReplicationTransfer(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesPerformReplicationTransfer: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesPerformReplicationTransfer: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_clusterpeercommandresponse.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_clusterpeercommandresponse.go new file mode 100644 index 00000000000..2f146170417 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_clusterpeercommandresponse.go @@ -0,0 +1,8 @@ +package volumesonpremmigration + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ClusterPeerCommandResponse struct { + PeerAcceptCommand *string `json:"peerAcceptCommand,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_peerclusterforvolumemigrationrequest.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_peerclusterforvolumemigrationrequest.go new file mode 100644 index 00000000000..fe63a83424c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_peerclusterforvolumemigrationrequest.go @@ -0,0 +1,8 @@ +package volumesonpremmigration + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type PeerClusterForVolumeMigrationRequest struct { + PeerIPAddresses []string `json:"peerIpAddresses"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_svmpeercommandresponse.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_svmpeercommandresponse.go new file mode 100644 index 00000000000..1578dcb9dcc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/model_svmpeercommandresponse.go @@ -0,0 +1,8 @@ +package volumesonpremmigration + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type SVMPeerCommandResponse struct { + SVMPeeringCommand *string `json:"svmPeeringCommand,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigration/version.go b/resource-manager/netapp/2025-06-01/volumesonpremmigration/version.go new file mode 100644 index 00000000000..684953d0fea --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigration/version.go @@ -0,0 +1,10 @@ +package volumesonpremmigration + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumesonpremmigration/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/client.go b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/client.go new file mode 100644 index 00000000000..2b54b0579fc --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/client.go @@ -0,0 +1,26 @@ +package volumesonpremmigrationfinalize + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesOnPremMigrationFinalizeClient struct { + Client *resourcemanager.Client +} + +func NewVolumesOnPremMigrationFinalizeClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumesOnPremMigrationFinalizeClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumesonpremmigrationfinalize", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumesOnPremMigrationFinalizeClient: %+v", err) + } + + return &VolumesOnPremMigrationFinalizeClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/id_volume.go b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/id_volume.go new file mode 100644 index 00000000000..1edbc85826f --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/id_volume.go @@ -0,0 +1,148 @@ +package volumesonpremmigrationfinalize + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/id_volume_test.go b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/id_volume_test.go new file mode 100644 index 00000000000..a0362282f55 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/id_volume_test.go @@ -0,0 +1,372 @@ +package volumesonpremmigrationfinalize + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/method_volumesfinalizeexternalreplication.go b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/method_volumesfinalizeexternalreplication.go new file mode 100644 index 00000000000..9f8b478e476 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/method_volumesfinalizeexternalreplication.go @@ -0,0 +1,69 @@ +package volumesonpremmigrationfinalize + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesFinalizeExternalReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesFinalizeExternalReplication ... +func (c VolumesOnPremMigrationFinalizeClient) VolumesFinalizeExternalReplication(ctx context.Context, id VolumeId) (result VolumesFinalizeExternalReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/finalizeExternalReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesFinalizeExternalReplicationThenPoll performs VolumesFinalizeExternalReplication then polls until it's completed +func (c VolumesOnPremMigrationFinalizeClient) VolumesFinalizeExternalReplicationThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesFinalizeExternalReplication(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesFinalizeExternalReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesFinalizeExternalReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/version.go b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/version.go new file mode 100644 index 00000000000..f45a39e0a2d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesonpremmigrationfinalize/version.go @@ -0,0 +1,10 @@ +package volumesonpremmigrationfinalize + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumesonpremmigrationfinalize/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/README.md b/resource-manager/netapp/2025-06-01/volumesrelocation/README.md new file mode 100644 index 00000000000..d226d86a44a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/README.md @@ -0,0 +1,61 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesrelocation` Documentation + +The `volumesrelocation` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesrelocation" +``` + + +### Client Initialization + +```go +client := volumesrelocation.NewVolumesRelocationClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VolumesRelocationClient.VolumesFinalizeRelocation` + +```go +ctx := context.TODO() +id := volumesrelocation.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesFinalizeRelocationThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesRelocationClient.VolumesRelocate` + +```go +ctx := context.TODO() +id := volumesrelocation.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumesrelocation.RelocateVolumeRequest{ + // ... +} + + +if err := client.VolumesRelocateThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesRelocationClient.VolumesRevertRelocation` + +```go +ctx := context.TODO() +id := volumesrelocation.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesRevertRelocationThenPoll(ctx, id); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/client.go b/resource-manager/netapp/2025-06-01/volumesrelocation/client.go new file mode 100644 index 00000000000..468673aa176 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/client.go @@ -0,0 +1,26 @@ +package volumesrelocation + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesRelocationClient struct { + Client *resourcemanager.Client +} + +func NewVolumesRelocationClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumesRelocationClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumesrelocation", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumesRelocationClient: %+v", err) + } + + return &VolumesRelocationClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/id_volume.go b/resource-manager/netapp/2025-06-01/volumesrelocation/id_volume.go new file mode 100644 index 00000000000..113162da6d7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/id_volume.go @@ -0,0 +1,148 @@ +package volumesrelocation + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/id_volume_test.go b/resource-manager/netapp/2025-06-01/volumesrelocation/id_volume_test.go new file mode 100644 index 00000000000..6a87e3c4d06 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/id_volume_test.go @@ -0,0 +1,372 @@ +package volumesrelocation + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesfinalizerelocation.go b/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesfinalizerelocation.go new file mode 100644 index 00000000000..8bc1cc1b39a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesfinalizerelocation.go @@ -0,0 +1,70 @@ +package volumesrelocation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesFinalizeRelocationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesFinalizeRelocation ... +func (c VolumesRelocationClient) VolumesFinalizeRelocation(ctx context.Context, id VolumeId) (result VolumesFinalizeRelocationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/finalizeRelocation", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesFinalizeRelocationThenPoll performs VolumesFinalizeRelocation then polls until it's completed +func (c VolumesRelocationClient) VolumesFinalizeRelocationThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesFinalizeRelocation(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesFinalizeRelocation: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesFinalizeRelocation: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesrelocate.go b/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesrelocate.go new file mode 100644 index 00000000000..1207d489ed4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesrelocate.go @@ -0,0 +1,74 @@ +package volumesrelocation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesRelocateOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesRelocate ... +func (c VolumesRelocationClient) VolumesRelocate(ctx context.Context, id VolumeId, input RelocateVolumeRequest) (result VolumesRelocateOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/relocate", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesRelocateThenPoll performs VolumesRelocate then polls until it's completed +func (c VolumesRelocationClient) VolumesRelocateThenPoll(ctx context.Context, id VolumeId, input RelocateVolumeRequest) error { + result, err := c.VolumesRelocate(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesRelocate: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesRelocate: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesrevertrelocation.go b/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesrevertrelocation.go new file mode 100644 index 00000000000..0091b1304fd --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/method_volumesrevertrelocation.go @@ -0,0 +1,70 @@ +package volumesrelocation + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesRevertRelocationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesRevertRelocation ... +func (c VolumesRelocationClient) VolumesRevertRelocation(ctx context.Context, id VolumeId) (result VolumesRevertRelocationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/revertRelocation", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesRevertRelocationThenPoll performs VolumesRevertRelocation then polls until it's completed +func (c VolumesRelocationClient) VolumesRevertRelocationThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesRevertRelocation(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesRevertRelocation: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesRevertRelocation: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/model_relocatevolumerequest.go b/resource-manager/netapp/2025-06-01/volumesrelocation/model_relocatevolumerequest.go new file mode 100644 index 00000000000..25cb3a56801 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/model_relocatevolumerequest.go @@ -0,0 +1,8 @@ +package volumesrelocation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type RelocateVolumeRequest struct { + CreationToken *string `json:"creationToken,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesrelocation/version.go b/resource-manager/netapp/2025-06-01/volumesrelocation/version.go new file mode 100644 index 00000000000..3da8b297a52 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrelocation/version.go @@ -0,0 +1,10 @@ +package volumesrelocation + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumesrelocation/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/README.md b/resource-manager/netapp/2025-06-01/volumesreplication/README.md new file mode 100644 index 00000000000..997074ff01d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/README.md @@ -0,0 +1,139 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesreplication` Documentation + +The `volumesreplication` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesreplication" +``` + + +### Client Initialization + +```go +client := volumesreplication.NewVolumesReplicationClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VolumesReplicationClient.VolumesAuthorizeReplication` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumesreplication.AuthorizeRequest{ + // ... +} + + +if err := client.VolumesAuthorizeReplicationThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesReplicationClient.VolumesBreakReplication` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumesreplication.BreakReplicationRequest{ + // ... +} + + +if err := client.VolumesBreakReplicationThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesReplicationClient.VolumesDeleteReplication` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesDeleteReplicationThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesReplicationClient.VolumesListReplications` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +read, err := client.VolumesListReplications(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VolumesReplicationClient.VolumesReInitializeReplication` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesReInitializeReplicationThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesReplicationClient.VolumesReestablishReplication` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumesreplication.ReestablishReplicationRequest{ + // ... +} + + +if err := client.VolumesReestablishReplicationThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` + + +### Example Usage: `VolumesReplicationClient.VolumesReplicationStatus` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +read, err := client.VolumesReplicationStatus(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VolumesReplicationClient.VolumesResyncReplication` + +```go +ctx := context.TODO() +id := volumesreplication.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +if err := client.VolumesResyncReplicationThenPoll(ctx, id); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/client.go b/resource-manager/netapp/2025-06-01/volumesreplication/client.go new file mode 100644 index 00000000000..092e04c0a0d --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/client.go @@ -0,0 +1,26 @@ +package volumesreplication + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesReplicationClient struct { + Client *resourcemanager.Client +} + +func NewVolumesReplicationClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumesReplicationClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumesreplication", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumesReplicationClient: %+v", err) + } + + return &VolumesReplicationClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/constants.go b/resource-manager/netapp/2025-06-01/volumesreplication/constants.go new file mode 100644 index 00000000000..2e5196391b7 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/constants.go @@ -0,0 +1,180 @@ +package volumesreplication + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type EndpointType string + +const ( + EndpointTypeDst EndpointType = "dst" + EndpointTypeSrc EndpointType = "src" +) + +func PossibleValuesForEndpointType() []string { + return []string{ + string(EndpointTypeDst), + string(EndpointTypeSrc), + } +} + +func (s *EndpointType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEndpointType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEndpointType(input string) (*EndpointType, error) { + vals := map[string]EndpointType{ + "dst": EndpointTypeDst, + "src": EndpointTypeSrc, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EndpointType(input) + return &out, nil +} + +type MirrorState string + +const ( + MirrorStateBroken MirrorState = "Broken" + MirrorStateMirrored MirrorState = "Mirrored" + MirrorStateUninitialized MirrorState = "Uninitialized" +) + +func PossibleValuesForMirrorState() []string { + return []string{ + string(MirrorStateBroken), + string(MirrorStateMirrored), + string(MirrorStateUninitialized), + } +} + +func (s *MirrorState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMirrorState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMirrorState(input string) (*MirrorState, error) { + vals := map[string]MirrorState{ + "broken": MirrorStateBroken, + "mirrored": MirrorStateMirrored, + "uninitialized": MirrorStateUninitialized, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MirrorState(input) + return &out, nil +} + +type RelationshipStatus string + +const ( + RelationshipStatusIdle RelationshipStatus = "Idle" + RelationshipStatusTransferring RelationshipStatus = "Transferring" +) + +func PossibleValuesForRelationshipStatus() []string { + return []string{ + string(RelationshipStatusIdle), + string(RelationshipStatusTransferring), + } +} + +func (s *RelationshipStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseRelationshipStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseRelationshipStatus(input string) (*RelationshipStatus, error) { + vals := map[string]RelationshipStatus{ + "idle": RelationshipStatusIdle, + "transferring": RelationshipStatusTransferring, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := RelationshipStatus(input) + return &out, nil +} + +type ReplicationSchedule string + +const ( + ReplicationScheduleDaily ReplicationSchedule = "daily" + ReplicationScheduleHourly ReplicationSchedule = "hourly" + ReplicationScheduleOneZerominutely ReplicationSchedule = "_10minutely" +) + +func PossibleValuesForReplicationSchedule() []string { + return []string{ + string(ReplicationScheduleDaily), + string(ReplicationScheduleHourly), + string(ReplicationScheduleOneZerominutely), + } +} + +func (s *ReplicationSchedule) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseReplicationSchedule(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseReplicationSchedule(input string) (*ReplicationSchedule, error) { + vals := map[string]ReplicationSchedule{ + "daily": ReplicationScheduleDaily, + "hourly": ReplicationScheduleHourly, + "_10minutely": ReplicationScheduleOneZerominutely, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ReplicationSchedule(input) + return &out, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/id_volume.go b/resource-manager/netapp/2025-06-01/volumesreplication/id_volume.go new file mode 100644 index 00000000000..f4997248ba1 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/id_volume.go @@ -0,0 +1,148 @@ +package volumesreplication + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/id_volume_test.go b/resource-manager/netapp/2025-06-01/volumesreplication/id_volume_test.go new file mode 100644 index 00000000000..51db68a7df6 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/id_volume_test.go @@ -0,0 +1,372 @@ +package volumesreplication + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesauthorizereplication.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesauthorizereplication.go new file mode 100644 index 00000000000..591d7c626ab --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesauthorizereplication.go @@ -0,0 +1,74 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesAuthorizeReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesAuthorizeReplication ... +func (c VolumesReplicationClient) VolumesAuthorizeReplication(ctx context.Context, id VolumeId, input AuthorizeRequest) (result VolumesAuthorizeReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/authorizeReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesAuthorizeReplicationThenPoll performs VolumesAuthorizeReplication then polls until it's completed +func (c VolumesReplicationClient) VolumesAuthorizeReplicationThenPoll(ctx context.Context, id VolumeId, input AuthorizeRequest) error { + result, err := c.VolumesAuthorizeReplication(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesAuthorizeReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesAuthorizeReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesbreakreplication.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesbreakreplication.go new file mode 100644 index 00000000000..280008e8175 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesbreakreplication.go @@ -0,0 +1,74 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesBreakReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesBreakReplication ... +func (c VolumesReplicationClient) VolumesBreakReplication(ctx context.Context, id VolumeId, input BreakReplicationRequest) (result VolumesBreakReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/breakReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesBreakReplicationThenPoll performs VolumesBreakReplication then polls until it's completed +func (c VolumesReplicationClient) VolumesBreakReplicationThenPoll(ctx context.Context, id VolumeId, input BreakReplicationRequest) error { + result, err := c.VolumesBreakReplication(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesBreakReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesBreakReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesdeletereplication.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesdeletereplication.go new file mode 100644 index 00000000000..cda9737c512 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesdeletereplication.go @@ -0,0 +1,70 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesDeleteReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesDeleteReplication ... +func (c VolumesReplicationClient) VolumesDeleteReplication(ctx context.Context, id VolumeId) (result VolumesDeleteReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/deleteReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesDeleteReplicationThenPoll performs VolumesDeleteReplication then polls until it's completed +func (c VolumesReplicationClient) VolumesDeleteReplicationThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesDeleteReplication(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesDeleteReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesDeleteReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumeslistreplications.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumeslistreplications.go new file mode 100644 index 00000000000..bed3dfeb672 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumeslistreplications.go @@ -0,0 +1,54 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesListReplicationsOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ListReplications +} + +// VolumesListReplications ... +func (c VolumesReplicationClient) VolumesListReplications(ctx context.Context, id VolumeId) (result VolumesListReplicationsOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/listReplications", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ListReplications + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreestablishreplication.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreestablishreplication.go new file mode 100644 index 00000000000..047fc04a7f9 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreestablishreplication.go @@ -0,0 +1,73 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesReestablishReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesReestablishReplication ... +func (c VolumesReplicationClient) VolumesReestablishReplication(ctx context.Context, id VolumeId, input ReestablishReplicationRequest) (result VolumesReestablishReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/reestablishReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesReestablishReplicationThenPoll performs VolumesReestablishReplication then polls until it's completed +func (c VolumesReplicationClient) VolumesReestablishReplicationThenPoll(ctx context.Context, id VolumeId, input ReestablishReplicationRequest) error { + result, err := c.VolumesReestablishReplication(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesReestablishReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesReestablishReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreinitializereplication.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreinitializereplication.go new file mode 100644 index 00000000000..3f479bf637a --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreinitializereplication.go @@ -0,0 +1,70 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesReInitializeReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesReInitializeReplication ... +func (c VolumesReplicationClient) VolumesReInitializeReplication(ctx context.Context, id VolumeId) (result VolumesReInitializeReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/reinitializeReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesReInitializeReplicationThenPoll performs VolumesReInitializeReplication then polls until it's completed +func (c VolumesReplicationClient) VolumesReInitializeReplicationThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesReInitializeReplication(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesReInitializeReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesReInitializeReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreplicationstatus.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreplicationstatus.go new file mode 100644 index 00000000000..2f626c60847 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesreplicationstatus.go @@ -0,0 +1,54 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesReplicationStatusOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *ReplicationStatus +} + +// VolumesReplicationStatus ... +func (c VolumesReplicationClient) VolumesReplicationStatus(ctx context.Context, id VolumeId) (result VolumesReplicationStatusOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Path: fmt.Sprintf("%s/replicationStatus", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var model ReplicationStatus + result.Model = &model + if err = resp.Unmarshal(result.Model); err != nil { + return + } + + return +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesresyncreplication.go b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesresyncreplication.go new file mode 100644 index 00000000000..23e1a66f518 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/method_volumesresyncreplication.go @@ -0,0 +1,70 @@ +package volumesreplication + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesResyncReplicationOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesResyncReplication ... +func (c VolumesReplicationClient) VolumesResyncReplication(ctx context.Context, id VolumeId) (result VolumesResyncReplicationOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/resyncReplication", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesResyncReplicationThenPoll performs VolumesResyncReplication then polls until it's completed +func (c VolumesReplicationClient) VolumesResyncReplicationThenPoll(ctx context.Context, id VolumeId) error { + result, err := c.VolumesResyncReplication(ctx, id) + if err != nil { + return fmt.Errorf("performing VolumesResyncReplication: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesResyncReplication: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/model_authorizerequest.go b/resource-manager/netapp/2025-06-01/volumesreplication/model_authorizerequest.go new file mode 100644 index 00000000000..ff1cefa31ef --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/model_authorizerequest.go @@ -0,0 +1,8 @@ +package volumesreplication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AuthorizeRequest struct { + RemoteVolumeResourceId *string `json:"remoteVolumeResourceId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/model_breakreplicationrequest.go b/resource-manager/netapp/2025-06-01/volumesreplication/model_breakreplicationrequest.go new file mode 100644 index 00000000000..b6a0a179a9c --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/model_breakreplicationrequest.go @@ -0,0 +1,8 @@ +package volumesreplication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type BreakReplicationRequest struct { + ForceBreakReplication *bool `json:"forceBreakReplication,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/model_listreplications.go b/resource-manager/netapp/2025-06-01/volumesreplication/model_listreplications.go new file mode 100644 index 00000000000..7929ea4ace4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/model_listreplications.go @@ -0,0 +1,8 @@ +package volumesreplication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListReplications struct { + Value *[]Replication `json:"value,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/model_reestablishreplicationrequest.go b/resource-manager/netapp/2025-06-01/volumesreplication/model_reestablishreplicationrequest.go new file mode 100644 index 00000000000..193b71f1490 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/model_reestablishreplicationrequest.go @@ -0,0 +1,8 @@ +package volumesreplication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReestablishReplicationRequest struct { + SourceVolumeId *string `json:"sourceVolumeId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/model_replication.go b/resource-manager/netapp/2025-06-01/volumesreplication/model_replication.go new file mode 100644 index 00000000000..ff1273507a2 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/model_replication.go @@ -0,0 +1,12 @@ +package volumesreplication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type Replication struct { + EndpointType *EndpointType `json:"endpointType,omitempty"` + RemoteVolumeRegion *string `json:"remoteVolumeRegion,omitempty"` + RemoteVolumeResourceId string `json:"remoteVolumeResourceId"` + ReplicationId *string `json:"replicationId,omitempty"` + ReplicationSchedule *ReplicationSchedule `json:"replicationSchedule,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/model_replicationstatus.go b/resource-manager/netapp/2025-06-01/volumesreplication/model_replicationstatus.go new file mode 100644 index 00000000000..2a79dddf7a4 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/model_replicationstatus.go @@ -0,0 +1,12 @@ +package volumesreplication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ReplicationStatus struct { + ErrorMessage *string `json:"errorMessage,omitempty"` + Healthy *bool `json:"healthy,omitempty"` + MirrorState *MirrorState `json:"mirrorState,omitempty"` + RelationshipStatus *RelationshipStatus `json:"relationshipStatus,omitempty"` + TotalProgress *string `json:"totalProgress,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesreplication/version.go b/resource-manager/netapp/2025-06-01/volumesreplication/version.go new file mode 100644 index 00000000000..4de12f24e61 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesreplication/version.go @@ -0,0 +1,10 @@ +package volumesreplication + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumesreplication/2025-06-01" +} diff --git a/resource-manager/netapp/2025-06-01/volumesrevert/README.md b/resource-manager/netapp/2025-06-01/volumesrevert/README.md new file mode 100644 index 00000000000..17440cac664 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrevert/README.md @@ -0,0 +1,37 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesrevert` Documentation + +The `volumesrevert` SDK allows for interaction with Azure Resource Manager `netapp` (API Version `2025-06-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/netapp/2025-06-01/volumesrevert" +``` + + +### Client Initialization + +```go +client := volumesrevert.NewVolumesRevertClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VolumesRevertClient.VolumesRevert` + +```go +ctx := context.TODO() +id := volumesrevert.NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + +payload := volumesrevert.VolumeRevert{ + // ... +} + + +if err := client.VolumesRevertThenPoll(ctx, id, payload); err != nil { + // handle the error +} +``` diff --git a/resource-manager/netapp/2025-06-01/volumesrevert/client.go b/resource-manager/netapp/2025-06-01/volumesrevert/client.go new file mode 100644 index 00000000000..353b7571904 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrevert/client.go @@ -0,0 +1,26 @@ +package volumesrevert + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesRevertClient struct { + Client *resourcemanager.Client +} + +func NewVolumesRevertClientWithBaseURI(sdkApi sdkEnv.Api) (*VolumesRevertClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "volumesrevert", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VolumesRevertClient: %+v", err) + } + + return &VolumesRevertClient{ + Client: client, + }, nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesrevert/id_volume.go b/resource-manager/netapp/2025-06-01/volumesrevert/id_volume.go new file mode 100644 index 00000000000..e61cfcfca50 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrevert/id_volume.go @@ -0,0 +1,148 @@ +package volumesrevert + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&VolumeId{}) +} + +var _ resourceids.ResourceId = &VolumeId{} + +// VolumeId is a struct representing the Resource ID for a Volume +type VolumeId struct { + SubscriptionId string + ResourceGroupName string + NetAppAccountName string + CapacityPoolName string + VolumeName string +} + +// NewVolumeID returns a new VolumeId struct +func NewVolumeID(subscriptionId string, resourceGroupName string, netAppAccountName string, capacityPoolName string, volumeName string) VolumeId { + return VolumeId{ + SubscriptionId: subscriptionId, + ResourceGroupName: resourceGroupName, + NetAppAccountName: netAppAccountName, + CapacityPoolName: capacityPoolName, + VolumeName: volumeName, + } +} + +// ParseVolumeID parses 'input' into a VolumeId +func ParseVolumeID(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseVolumeIDInsensitively parses 'input' case-insensitively into a VolumeId +// note: this method should only be used for API response data and not user input +func ParseVolumeIDInsensitively(input string) (*VolumeId, error) { + parser := resourceids.NewParserFromResourceIdType(&VolumeId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := VolumeId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *VolumeId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.ResourceGroupName, ok = input.Parsed["resourceGroupName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "resourceGroupName", input) + } + + if id.NetAppAccountName, ok = input.Parsed["netAppAccountName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "netAppAccountName", input) + } + + if id.CapacityPoolName, ok = input.Parsed["capacityPoolName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "capacityPoolName", input) + } + + if id.VolumeName, ok = input.Parsed["volumeName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "volumeName", input) + } + + return nil +} + +// ValidateVolumeID checks that 'input' can be parsed as a Volume ID +func ValidateVolumeID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseVolumeID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Volume ID +func (id VolumeId) ID() string { + fmtString := "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.NetApp/netAppAccounts/%s/capacityPools/%s/volumes/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.ResourceGroupName, id.NetAppAccountName, id.CapacityPoolName, id.VolumeName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Volume ID +func (id VolumeId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticResourceGroups", "resourceGroups", "resourceGroups"), + resourceids.ResourceGroupSegment("resourceGroupName", "example-resource-group"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetApp", "Microsoft.NetApp", "Microsoft.NetApp"), + resourceids.StaticSegment("staticNetAppAccounts", "netAppAccounts", "netAppAccounts"), + resourceids.UserSpecifiedSegment("netAppAccountName", "netAppAccountName"), + resourceids.StaticSegment("staticCapacityPools", "capacityPools", "capacityPools"), + resourceids.UserSpecifiedSegment("capacityPoolName", "capacityPoolName"), + resourceids.StaticSegment("staticVolumes", "volumes", "volumes"), + resourceids.UserSpecifiedSegment("volumeName", "volumeName"), + } +} + +// String returns a human-readable description of this Volume ID +func (id VolumeId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Resource Group Name: %q", id.ResourceGroupName), + fmt.Sprintf("Net App Account Name: %q", id.NetAppAccountName), + fmt.Sprintf("Capacity Pool Name: %q", id.CapacityPoolName), + fmt.Sprintf("Volume Name: %q", id.VolumeName), + } + return fmt.Sprintf("Volume (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/netapp/2025-06-01/volumesrevert/id_volume_test.go b/resource-manager/netapp/2025-06-01/volumesrevert/id_volume_test.go new file mode 100644 index 00000000000..f26bf43ecc3 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrevert/id_volume_test.go @@ -0,0 +1,372 @@ +package volumesrevert + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &VolumeId{} + +func TestNewVolumeID(t *testing.T) { + id := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.ResourceGroupName != "example-resource-group" { + t.Fatalf("Expected %q but got %q for Segment 'ResourceGroupName'", id.ResourceGroupName, "example-resource-group") + } + + if id.NetAppAccountName != "netAppAccountName" { + t.Fatalf("Expected %q but got %q for Segment 'NetAppAccountName'", id.NetAppAccountName, "netAppAccountName") + } + + if id.CapacityPoolName != "capacityPoolName" { + t.Fatalf("Expected %q but got %q for Segment 'CapacityPoolName'", id.CapacityPoolName, "capacityPoolName") + } + + if id.VolumeName != "volumeName" { + t.Fatalf("Expected %q but got %q for Segment 'VolumeName'", id.VolumeName, "volumeName") + } +} + +func TestFormatVolumeID(t *testing.T) { + actual := NewVolumeID("12345678-1234-9876-4563-123456789012", "example-resource-group", "netAppAccountName", "capacityPoolName", "volumeName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseVolumeID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestParseVolumeIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *VolumeId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "example-resource-group", + NetAppAccountName: "netAppAccountName", + CapacityPoolName: "capacityPoolName", + VolumeName: "volumeName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/example-resource-group/providers/Microsoft.NetApp/netAppAccounts/netAppAccountName/capacityPools/capacityPoolName/volumes/volumeName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE", + Expected: &VolumeId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + ResourceGroupName: "eXaMpLe-rEsOuRcE-GrOuP", + NetAppAccountName: "nEtApPaCcOuNtNaMe", + CapacityPoolName: "cApAcItYpOoLnAmE", + VolumeName: "vOlUmEnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/rEsOuRcEgRoUpS/eXaMpLe-rEsOuRcE-GrOuP/pRoViDeRs/mIcRoSoFt.nEtApP/nEtApPaCcOuNtS/nEtApPaCcOuNtNaMe/cApAcItYpOoLs/cApAcItYpOoLnAmE/vOlUmEs/vOlUmEnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseVolumeIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.ResourceGroupName != v.Expected.ResourceGroupName { + t.Fatalf("Expected %q but got %q for ResourceGroupName", v.Expected.ResourceGroupName, actual.ResourceGroupName) + } + + if actual.NetAppAccountName != v.Expected.NetAppAccountName { + t.Fatalf("Expected %q but got %q for NetAppAccountName", v.Expected.NetAppAccountName, actual.NetAppAccountName) + } + + if actual.CapacityPoolName != v.Expected.CapacityPoolName { + t.Fatalf("Expected %q but got %q for CapacityPoolName", v.Expected.CapacityPoolName, actual.CapacityPoolName) + } + + if actual.VolumeName != v.Expected.VolumeName { + t.Fatalf("Expected %q but got %q for VolumeName", v.Expected.VolumeName, actual.VolumeName) + } + + } +} + +func TestSegmentsForVolumeId(t *testing.T) { + segments := VolumeId{}.Segments() + if len(segments) == 0 { + t.Fatalf("VolumeId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/netapp/2025-06-01/volumesrevert/method_volumesrevert.go b/resource-manager/netapp/2025-06-01/volumesrevert/method_volumesrevert.go new file mode 100644 index 00000000000..0be129fdd47 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrevert/method_volumesrevert.go @@ -0,0 +1,74 @@ +package volumesrevert + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumesRevertOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// VolumesRevert ... +func (c VolumesRevertClient) VolumesRevert(ctx context.Context, id VolumeId, input VolumeRevert) (result VolumesRevertOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusOK, + }, + HttpMethod: http.MethodPost, + Path: fmt.Sprintf("%s/revert", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + if err = req.Marshal(input); err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// VolumesRevertThenPoll performs VolumesRevert then polls until it's completed +func (c VolumesRevertClient) VolumesRevertThenPoll(ctx context.Context, id VolumeId, input VolumeRevert) error { + result, err := c.VolumesRevert(ctx, id, input) + if err != nil { + return fmt.Errorf("performing VolumesRevert: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after VolumesRevert: %+v", err) + } + + return nil +} diff --git a/resource-manager/netapp/2025-06-01/volumesrevert/model_volumerevert.go b/resource-manager/netapp/2025-06-01/volumesrevert/model_volumerevert.go new file mode 100644 index 00000000000..c5adf461cf8 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrevert/model_volumerevert.go @@ -0,0 +1,8 @@ +package volumesrevert + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VolumeRevert struct { + SnapshotId *string `json:"snapshotId,omitempty"` +} diff --git a/resource-manager/netapp/2025-06-01/volumesrevert/version.go b/resource-manager/netapp/2025-06-01/volumesrevert/version.go new file mode 100644 index 00000000000..22df11cd6d0 --- /dev/null +++ b/resource-manager/netapp/2025-06-01/volumesrevert/version.go @@ -0,0 +1,10 @@ +package volumesrevert + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-06-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/volumesrevert/2025-06-01" +} diff --git a/resource-manager/network/2024-05-01/networkmanageractiveconfigurations/constants.go b/resource-manager/network/2024-05-01/networkmanageractiveconfigurations/constants.go index c9291fb2fab..793caeabedc 100644 --- a/resource-manager/network/2024-05-01/networkmanageractiveconfigurations/constants.go +++ b/resource-manager/network/2024-05-01/networkmanageractiveconfigurations/constants.go @@ -138,6 +138,8 @@ func parseGroupMemberType(input string) (*GroupMemberType, error) { type ProvisioningState string const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" ProvisioningStateDeleting ProvisioningState = "Deleting" ProvisioningStateFailed ProvisioningState = "Failed" ProvisioningStateSucceeded ProvisioningState = "Succeeded" @@ -146,6 +148,8 @@ const ( func PossibleValuesForProvisioningState() []string { return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateCreating), string(ProvisioningStateDeleting), string(ProvisioningStateFailed), string(ProvisioningStateSucceeded), @@ -168,6 +172,8 @@ func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { func parseProvisioningState(input string) (*ProvisioningState, error) { vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "creating": ProvisioningStateCreating, "deleting": ProvisioningStateDeleting, "failed": ProvisioningStateFailed, "succeeded": ProvisioningStateSucceeded, diff --git a/resource-manager/network/2024-05-01/networkmanageractiveconnectivityconfigurations/constants.go b/resource-manager/network/2024-05-01/networkmanageractiveconnectivityconfigurations/constants.go index 642ecd4a0bd..6f3a6c4f554 100644 --- a/resource-manager/network/2024-05-01/networkmanageractiveconnectivityconfigurations/constants.go +++ b/resource-manager/network/2024-05-01/networkmanageractiveconnectivityconfigurations/constants.go @@ -217,6 +217,8 @@ func parseIsGlobal(input string) (*IsGlobal, error) { type ProvisioningState string const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" ProvisioningStateDeleting ProvisioningState = "Deleting" ProvisioningStateFailed ProvisioningState = "Failed" ProvisioningStateSucceeded ProvisioningState = "Succeeded" @@ -225,6 +227,8 @@ const ( func PossibleValuesForProvisioningState() []string { return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateCreating), string(ProvisioningStateDeleting), string(ProvisioningStateFailed), string(ProvisioningStateSucceeded), @@ -247,6 +251,8 @@ func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { func parseProvisioningState(input string) (*ProvisioningState, error) { vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "creating": ProvisioningStateCreating, "deleting": ProvisioningStateDeleting, "failed": ProvisioningStateFailed, "succeeded": ProvisioningStateSucceeded, diff --git a/resource-manager/network/2024-05-01/networkmanagereffectiveconnectivityconfiguration/constants.go b/resource-manager/network/2024-05-01/networkmanagereffectiveconnectivityconfiguration/constants.go index caa8b3c8747..9007e4a33df 100644 --- a/resource-manager/network/2024-05-01/networkmanagereffectiveconnectivityconfiguration/constants.go +++ b/resource-manager/network/2024-05-01/networkmanagereffectiveconnectivityconfiguration/constants.go @@ -217,6 +217,8 @@ func parseIsGlobal(input string) (*IsGlobal, error) { type ProvisioningState string const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" ProvisioningStateDeleting ProvisioningState = "Deleting" ProvisioningStateFailed ProvisioningState = "Failed" ProvisioningStateSucceeded ProvisioningState = "Succeeded" @@ -225,6 +227,8 @@ const ( func PossibleValuesForProvisioningState() []string { return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateCreating), string(ProvisioningStateDeleting), string(ProvisioningStateFailed), string(ProvisioningStateSucceeded), @@ -247,6 +251,8 @@ func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { func parseProvisioningState(input string) (*ProvisioningState, error) { vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "creating": ProvisioningStateCreating, "deleting": ProvisioningStateDeleting, "failed": ProvisioningStateFailed, "succeeded": ProvisioningStateSucceeded, diff --git a/resource-manager/network/2024-05-01/networkmanagereffectivesecurityadminrules/constants.go b/resource-manager/network/2024-05-01/networkmanagereffectivesecurityadminrules/constants.go index 67086d778e3..6dbd1e6db74 100644 --- a/resource-manager/network/2024-05-01/networkmanagereffectivesecurityadminrules/constants.go +++ b/resource-manager/network/2024-05-01/networkmanagereffectivesecurityadminrules/constants.go @@ -138,6 +138,8 @@ func parseGroupMemberType(input string) (*GroupMemberType, error) { type ProvisioningState string const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" ProvisioningStateDeleting ProvisioningState = "Deleting" ProvisioningStateFailed ProvisioningState = "Failed" ProvisioningStateSucceeded ProvisioningState = "Succeeded" @@ -146,6 +148,8 @@ const ( func PossibleValuesForProvisioningState() []string { return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateCreating), string(ProvisioningStateDeleting), string(ProvisioningStateFailed), string(ProvisioningStateSucceeded), @@ -168,6 +172,8 @@ func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { func parseProvisioningState(input string) (*ProvisioningState, error) { vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "creating": ProvisioningStateCreating, "deleting": ProvisioningStateDeleting, "failed": ProvisioningStateFailed, "succeeded": ProvisioningStateSucceeded, diff --git a/resource-manager/network/2024-07-01/client.go b/resource-manager/network/2024-07-01/client.go index a9853f5f350..080eb059eeb 100644 --- a/resource-manager/network/2024-07-01/client.go +++ b/resource-manager/network/2024-07-01/client.go @@ -78,6 +78,7 @@ import ( "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networksecurityperimeterloggingconfigurations" "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networksecurityperimeterprofiles" "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networksecurityperimeters" + "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networksecurityperimeterservicetags" "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networkvirtualappliances" "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networkwatchers" "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/p2svpngateways" @@ -208,6 +209,7 @@ type Client struct { NetworkSecurityPerimeterLinks *networksecurityperimeterlinks.NetworkSecurityPerimeterLinksClient NetworkSecurityPerimeterLoggingConfigurations *networksecurityperimeterloggingconfigurations.NetworkSecurityPerimeterLoggingConfigurationsClient NetworkSecurityPerimeterProfiles *networksecurityperimeterprofiles.NetworkSecurityPerimeterProfilesClient + NetworkSecurityPerimeterServiceTags *networksecurityperimeterservicetags.NetworkSecurityPerimeterServiceTagsClient NetworkSecurityPerimeters *networksecurityperimeters.NetworkSecurityPerimetersClient NetworkVirtualAppliances *networkvirtualappliances.NetworkVirtualAppliancesClient NetworkWatchers *networkwatchers.NetworkWatchersClient @@ -692,6 +694,12 @@ func NewClientWithBaseURI(sdkApi sdkEnv.Api, configureFunc func(c *resourcemanag } configureFunc(networkSecurityPerimeterProfilesClient.Client) + networkSecurityPerimeterServiceTagsClient, err := networksecurityperimeterservicetags.NewNetworkSecurityPerimeterServiceTagsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building NetworkSecurityPerimeterServiceTags client: %+v", err) + } + configureFunc(networkSecurityPerimeterServiceTagsClient.Client) + networkSecurityPerimetersClient, err := networksecurityperimeters.NewNetworkSecurityPerimetersClientWithBaseURI(sdkApi) if err != nil { return nil, fmt.Errorf("building NetworkSecurityPerimeters client: %+v", err) @@ -1094,6 +1102,7 @@ func NewClientWithBaseURI(sdkApi sdkEnv.Api, configureFunc func(c *resourcemanag NetworkSecurityPerimeterLinks: networkSecurityPerimeterLinksClient, NetworkSecurityPerimeterLoggingConfigurations: networkSecurityPerimeterLoggingConfigurationsClient, NetworkSecurityPerimeterProfiles: networkSecurityPerimeterProfilesClient, + NetworkSecurityPerimeterServiceTags: networkSecurityPerimeterServiceTagsClient, NetworkSecurityPerimeters: networkSecurityPerimetersClient, NetworkVirtualAppliances: networkVirtualAppliancesClient, NetworkWatchers: networkWatchersClient, diff --git a/resource-manager/network/2024-07-01/networkmanageractiveconfigurations/constants.go b/resource-manager/network/2024-07-01/networkmanageractiveconfigurations/constants.go index c9291fb2fab..793caeabedc 100644 --- a/resource-manager/network/2024-07-01/networkmanageractiveconfigurations/constants.go +++ b/resource-manager/network/2024-07-01/networkmanageractiveconfigurations/constants.go @@ -138,6 +138,8 @@ func parseGroupMemberType(input string) (*GroupMemberType, error) { type ProvisioningState string const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" ProvisioningStateDeleting ProvisioningState = "Deleting" ProvisioningStateFailed ProvisioningState = "Failed" ProvisioningStateSucceeded ProvisioningState = "Succeeded" @@ -146,6 +148,8 @@ const ( func PossibleValuesForProvisioningState() []string { return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateCreating), string(ProvisioningStateDeleting), string(ProvisioningStateFailed), string(ProvisioningStateSucceeded), @@ -168,6 +172,8 @@ func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { func parseProvisioningState(input string) (*ProvisioningState, error) { vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "creating": ProvisioningStateCreating, "deleting": ProvisioningStateDeleting, "failed": ProvisioningStateFailed, "succeeded": ProvisioningStateSucceeded, diff --git a/resource-manager/network/2024-07-01/networkmanageractiveconnectivityconfigurations/constants.go b/resource-manager/network/2024-07-01/networkmanageractiveconnectivityconfigurations/constants.go index 8195863c49a..451770f8cbb 100644 --- a/resource-manager/network/2024-07-01/networkmanageractiveconnectivityconfigurations/constants.go +++ b/resource-manager/network/2024-07-01/networkmanageractiveconnectivityconfigurations/constants.go @@ -340,6 +340,8 @@ func parsePeeringEnforcement(input string) (*PeeringEnforcement, error) { type ProvisioningState string const ( + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" ProvisioningStateDeleting ProvisioningState = "Deleting" ProvisioningStateFailed ProvisioningState = "Failed" ProvisioningStateSucceeded ProvisioningState = "Succeeded" @@ -348,6 +350,8 @@ const ( func PossibleValuesForProvisioningState() []string { return []string{ + string(ProvisioningStateCanceled), + string(ProvisioningStateCreating), string(ProvisioningStateDeleting), string(ProvisioningStateFailed), string(ProvisioningStateSucceeded), @@ -370,6 +374,8 @@ func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { func parseProvisioningState(input string) (*ProvisioningState, error) { vals := map[string]ProvisioningState{ + "canceled": ProvisioningStateCanceled, + "creating": ProvisioningStateCreating, "deleting": ProvisioningStateDeleting, "failed": ProvisioningStateFailed, "succeeded": ProvisioningStateSucceeded, diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/README.md b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/README.md new file mode 100644 index 00000000000..ec4c090eb0f --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/README.md @@ -0,0 +1,37 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networksecurityperimeterservicetags` Documentation + +The `networksecurityperimeterservicetags` SDK allows for interaction with Azure Resource Manager `network` (API Version `2024-07-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-sdk/resource-manager/network/2024-07-01/networksecurityperimeterservicetags" +``` + + +### Client Initialization + +```go +client := networksecurityperimeterservicetags.NewNetworkSecurityPerimeterServiceTagsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `NetworkSecurityPerimeterServiceTagsClient.List` + +```go +ctx := context.TODO() +id := networksecurityperimeterservicetags.NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + +// alternatively `client.List(ctx, id)` can be used to do batched pagination +items, err := client.ListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/client.go b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/client.go new file mode 100644 index 00000000000..d2f81dcc3ca --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/client.go @@ -0,0 +1,26 @@ +package networksecurityperimeterservicetags + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NetworkSecurityPerimeterServiceTagsClient struct { + Client *resourcemanager.Client +} + +func NewNetworkSecurityPerimeterServiceTagsClientWithBaseURI(sdkApi sdkEnv.Api) (*NetworkSecurityPerimeterServiceTagsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "networksecurityperimeterservicetags", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating NetworkSecurityPerimeterServiceTagsClient: %+v", err) + } + + return &NetworkSecurityPerimeterServiceTagsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/id_location.go b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/id_location.go new file mode 100644 index 00000000000..8308a00c22d --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/id_location.go @@ -0,0 +1,121 @@ +package networksecurityperimeterservicetags + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/recaser" + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +func init() { + recaser.RegisterResourceId(&LocationId{}) +} + +var _ resourceids.ResourceId = &LocationId{} + +// LocationId is a struct representing the Resource ID for a Location +type LocationId struct { + SubscriptionId string + LocationName string +} + +// NewLocationID returns a new LocationId struct +func NewLocationID(subscriptionId string, locationName string) LocationId { + return LocationId{ + SubscriptionId: subscriptionId, + LocationName: locationName, + } +} + +// ParseLocationID parses 'input' into a LocationId +func ParseLocationID(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, false) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +// ParseLocationIDInsensitively parses 'input' case-insensitively into a LocationId +// note: this method should only be used for API response data and not user input +func ParseLocationIDInsensitively(input string) (*LocationId, error) { + parser := resourceids.NewParserFromResourceIdType(&LocationId{}) + parsed, err := parser.Parse(input, true) + if err != nil { + return nil, fmt.Errorf("parsing %q: %+v", input, err) + } + + id := LocationId{} + if err = id.FromParseResult(*parsed); err != nil { + return nil, err + } + + return &id, nil +} + +func (id *LocationId) FromParseResult(input resourceids.ParseResult) error { + var ok bool + + if id.SubscriptionId, ok = input.Parsed["subscriptionId"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "subscriptionId", input) + } + + if id.LocationName, ok = input.Parsed["locationName"]; !ok { + return resourceids.NewSegmentNotSpecifiedError(id, "locationName", input) + } + + return nil +} + +// ValidateLocationID checks that 'input' can be parsed as a Location ID +func ValidateLocationID(input interface{}, key string) (warnings []string, errors []error) { + v, ok := input.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected %q to be a string", key)) + return + } + + if _, err := ParseLocationID(v); err != nil { + errors = append(errors, err) + } + + return +} + +// ID returns the formatted Location ID +func (id LocationId) ID() string { + fmtString := "/subscriptions/%s/providers/Microsoft.Network/locations/%s" + return fmt.Sprintf(fmtString, id.SubscriptionId, id.LocationName) +} + +// Segments returns a slice of Resource ID Segments which comprise this Location ID +func (id LocationId) Segments() []resourceids.Segment { + return []resourceids.Segment{ + resourceids.StaticSegment("staticSubscriptions", "subscriptions", "subscriptions"), + resourceids.SubscriptionIdSegment("subscriptionId", "12345678-1234-9876-4563-123456789012"), + resourceids.StaticSegment("staticProviders", "providers", "providers"), + resourceids.ResourceProviderSegment("staticMicrosoftNetwork", "Microsoft.Network", "Microsoft.Network"), + resourceids.StaticSegment("staticLocations", "locations", "locations"), + resourceids.UserSpecifiedSegment("locationName", "locationName"), + } +} + +// String returns a human-readable description of this Location ID +func (id LocationId) String() string { + components := []string{ + fmt.Sprintf("Subscription: %q", id.SubscriptionId), + fmt.Sprintf("Location Name: %q", id.LocationName), + } + return fmt.Sprintf("Location (%s)", strings.Join(components, "\n")) +} diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/id_location_test.go b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/id_location_test.go new file mode 100644 index 00000000000..b957a9dc60e --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/id_location_test.go @@ -0,0 +1,237 @@ +package networksecurityperimeterservicetags + +import ( + "testing" + + "github.com/hashicorp/go-azure-helpers/resourcemanager/resourceids" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +var _ resourceids.ResourceId = &LocationId{} + +func TestNewLocationID(t *testing.T) { + id := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName") + + if id.SubscriptionId != "12345678-1234-9876-4563-123456789012" { + t.Fatalf("Expected %q but got %q for Segment 'SubscriptionId'", id.SubscriptionId, "12345678-1234-9876-4563-123456789012") + } + + if id.LocationName != "locationName" { + t.Fatalf("Expected %q but got %q for Segment 'LocationName'", id.LocationName, "locationName") + } +} + +func TestFormatLocationID(t *testing.T) { + actual := NewLocationID("12345678-1234-9876-4563-123456789012", "locationName").ID() + expected := "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network/locations/locationName" + if actual != expected { + t.Fatalf("Expected the Formatted ID to be %q but got %q", expected, actual) + } +} + +func TestParseLocationID(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network/locations", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network/locations/locationName/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationID(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestParseLocationIDInsensitively(t *testing.T) { + testData := []struct { + Input string + Error bool + Expected *LocationId + }{ + { + // Incomplete URI + Input: "", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtWoRk", + Error: true, + }, + { + // Incomplete URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network/locations", + Error: true, + }, + { + // Incomplete URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtWoRk/lOcAtIoNs", + Error: true, + }, + { + // Valid URI + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network/locations/locationName", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "locationName", + }, + }, + { + // Invalid (Valid Uri with Extra segment) + Input: "/subscriptions/12345678-1234-9876-4563-123456789012/providers/Microsoft.Network/locations/locationName/extra", + Error: true, + }, + { + // Valid URI (mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtWoRk/lOcAtIoNs/lOcAtIoNnAmE", + Expected: &LocationId{ + SubscriptionId: "12345678-1234-9876-4563-123456789012", + LocationName: "lOcAtIoNnAmE", + }, + }, + { + // Invalid (Valid Uri with Extra segment - mIxEd CaSe since this is insensitive) + Input: "/sUbScRiPtIoNs/12345678-1234-9876-4563-123456789012/pRoViDeRs/mIcRoSoFt.nEtWoRk/lOcAtIoNs/lOcAtIoNnAmE/extra", + Error: true, + }, + } + for _, v := range testData { + t.Logf("[DEBUG] Testing %q", v.Input) + + actual, err := ParseLocationIDInsensitively(v.Input) + if err != nil { + if v.Error { + continue + } + + t.Fatalf("Expect a value but got an error: %+v", err) + } + if v.Error { + t.Fatal("Expect an error but didn't get one") + } + + if actual.SubscriptionId != v.Expected.SubscriptionId { + t.Fatalf("Expected %q but got %q for SubscriptionId", v.Expected.SubscriptionId, actual.SubscriptionId) + } + + if actual.LocationName != v.Expected.LocationName { + t.Fatalf("Expected %q but got %q for LocationName", v.Expected.LocationName, actual.LocationName) + } + + } +} + +func TestSegmentsForLocationId(t *testing.T) { + segments := LocationId{}.Segments() + if len(segments) == 0 { + t.Fatalf("LocationId has no segments") + } + + uniqueNames := make(map[string]struct{}, 0) + for _, segment := range segments { + uniqueNames[segment.Name] = struct{}{} + } + if len(uniqueNames) != len(segments) { + t.Fatalf("Expected the Segments to be unique but got %q unique segments and %d total segments", len(uniqueNames), len(segments)) + } +} diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/method_list.go b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/method_list.go new file mode 100644 index 00000000000..d161d6b7d6d --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/method_list.go @@ -0,0 +1,105 @@ +package networksecurityperimeterservicetags + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type ListOperationResponse struct { + HttpResponse *http.Response + OData *odata.OData + Model *[]NspServiceTagsResource +} + +type ListCompleteResult struct { + LatestHttpResponse *http.Response + Items []NspServiceTagsResource +} + +type ListCustomPager struct { + NextLink *odata.Link `json:"nextLink"` +} + +func (p *ListCustomPager) NextPageLink() *odata.Link { + defer func() { + p.NextLink = nil + }() + + return p.NextLink +} + +// List ... +func (c NetworkSecurityPerimeterServiceTagsClient) List(ctx context.Context, id LocationId) (result ListOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusOK, + }, + HttpMethod: http.MethodGet, + Pager: &ListCustomPager{}, + Path: fmt.Sprintf("%s/nspServiceTags", id.ID()), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.ExecutePaged(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + var values struct { + Values *[]NspServiceTagsResource `json:"value"` + } + if err = resp.Unmarshal(&values); err != nil { + return + } + + result.Model = values.Values + + return +} + +// ListComplete retrieves all the results into a single object +func (c NetworkSecurityPerimeterServiceTagsClient) ListComplete(ctx context.Context, id LocationId) (ListCompleteResult, error) { + return c.ListCompleteMatchingPredicate(ctx, id, NspServiceTagsResourceOperationPredicate{}) +} + +// ListCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c NetworkSecurityPerimeterServiceTagsClient) ListCompleteMatchingPredicate(ctx context.Context, id LocationId, predicate NspServiceTagsResourceOperationPredicate) (result ListCompleteResult, err error) { + items := make([]NspServiceTagsResource, 0) + + resp, err := c.List(ctx, id) + if err != nil { + result.LatestHttpResponse = resp.HttpResponse + err = fmt.Errorf("loading results: %+v", err) + return + } + if resp.Model != nil { + for _, v := range *resp.Model { + if predicate.Matches(v) { + items = append(items, v) + } + } + } + + result = ListCompleteResult{ + LatestHttpResponse: resp.HttpResponse, + Items: items, + } + return +} diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/model_nspservicetagsresource.go b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/model_nspservicetagsresource.go new file mode 100644 index 00000000000..ce1f4aad125 --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/model_nspservicetagsresource.go @@ -0,0 +1,8 @@ +package networksecurityperimeterservicetags + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NspServiceTagsResource struct { + ServiceTags *[]string `json:"serviceTags,omitempty"` +} diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/predicates.go b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/predicates.go new file mode 100644 index 00000000000..4d22dddee1b --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/predicates.go @@ -0,0 +1,12 @@ +package networksecurityperimeterservicetags + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type NspServiceTagsResourceOperationPredicate struct { +} + +func (p NspServiceTagsResourceOperationPredicate) Matches(input NspServiceTagsResource) bool { + + return true +} diff --git a/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/version.go b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/version.go new file mode 100644 index 00000000000..2687170c4bc --- /dev/null +++ b/resource-manager/network/2024-07-01/networksecurityperimeterservicetags/version.go @@ -0,0 +1,10 @@ +package networksecurityperimeterservicetags + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2024-07-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/networksecurityperimeterservicetags/2024-07-01" +} diff --git a/resource-manager/recoveryservices/2025-02-01/client.go b/resource-manager/recoveryservices/2025-02-01/client.go index 8dc270a41b4..5b2f3351ca9 100644 --- a/resource-manager/recoveryservices/2025-02-01/client.go +++ b/resource-manager/recoveryservices/2025-02-01/client.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/go-azure-sdk/resource-manager/recoveryservices/2025-02-01/openapis" "github.com/hashicorp/go-azure-sdk/resource-manager/recoveryservices/2025-02-01/privatelinkresourceoperationgroup" "github.com/hashicorp/go-azure-sdk/resource-manager/recoveryservices/2025-02-01/vaultextendedinforesources" + "github.com/hashicorp/go-azure-sdk/resource-manager/recoveryservices/2025-02-01/vaults" "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" ) @@ -19,6 +20,7 @@ type Client struct { Openapis *openapis.OpenapisClient PrivateLinkResourceOperationGroup *privatelinkresourceoperationgroup.PrivateLinkResourceOperationGroupClient VaultExtendedInfoResources *vaultextendedinforesources.VaultExtendedInfoResourcesClient + Vaults *vaults.VaultsClient } func NewClientWithBaseURI(sdkApi sdkEnv.Api, configureFunc func(c *resourcemanager.Client)) (*Client, error) { @@ -46,10 +48,17 @@ func NewClientWithBaseURI(sdkApi sdkEnv.Api, configureFunc func(c *resourcemanag } configureFunc(vaultExtendedInfoResourcesClient.Client) + vaultsClient, err := vaults.NewVaultsClientWithBaseURI(sdkApi) + if err != nil { + return nil, fmt.Errorf("building Vaults client: %+v", err) + } + configureFunc(vaultsClient.Client) + return &Client{ GetOperationResult: getOperationResultClient, Openapis: openapisClient, PrivateLinkResourceOperationGroup: privateLinkResourceOperationGroupClient, VaultExtendedInfoResources: vaultExtendedInfoResourcesClient, + Vaults: vaultsClient, }, nil } diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/README.md b/resource-manager/recoveryservices/2025-02-01/openapis/README.md index 9bc85ea7776..3c6965db5c4 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/README.md +++ b/resource-manager/recoveryservices/2025-02-01/openapis/README.md @@ -8,7 +8,6 @@ This readme covers example usages, but further information on [using this SDK ca ### Import Path ```go -import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" import "github.com/hashicorp/go-azure-sdk/resource-manager/recoveryservices/2025-02-01/openapis" ``` @@ -61,153 +60,3 @@ if model := read.Model; model != nil { // do something with the model/response object } ``` - - -### Example Usage: `OpenapisClient.RegisteredIdentitiesDelete` - -```go -ctx := context.TODO() -id := openapis.NewRegisteredIdentityID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName", "registeredIdentityName") - -read, err := client.RegisteredIdentitiesDelete(ctx, id) -if err != nil { - // handle the error -} -if model := read.Model; model != nil { - // do something with the model/response object -} -``` - - -### Example Usage: `OpenapisClient.ReplicationUsagesList` - -```go -ctx := context.TODO() -id := openapis.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") - -// alternatively `client.ReplicationUsagesList(ctx, id)` can be used to do batched pagination -items, err := client.ReplicationUsagesListComplete(ctx, id) -if err != nil { - // handle the error -} -for _, item := range items { - // do something -} -``` - - -### Example Usage: `OpenapisClient.UsagesListByVaults` - -```go -ctx := context.TODO() -id := openapis.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") - -// alternatively `client.UsagesListByVaults(ctx, id)` can be used to do batched pagination -items, err := client.UsagesListByVaultsComplete(ctx, id) -if err != nil { - // handle the error -} -for _, item := range items { - // do something -} -``` - - -### Example Usage: `OpenapisClient.VaultCertificatesCreate` - -```go -ctx := context.TODO() -id := openapis.NewCertificateID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName", "certificateName") - -payload := openapis.CertificateRequest{ - // ... -} - - -read, err := client.VaultCertificatesCreate(ctx, id, payload) -if err != nil { - // handle the error -} -if model := read.Model; model != nil { - // do something with the model/response object -} -``` - - -### Example Usage: `OpenapisClient.VaultsCreateOrUpdate` - -```go -ctx := context.TODO() -id := openapis.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") - -payload := openapis.Vault{ - // ... -} - - -if err := client.VaultsCreateOrUpdateThenPoll(ctx, id, payload, openapis.DefaultVaultsCreateOrUpdateOperationOptions()); err != nil { - // handle the error -} -``` - - -### Example Usage: `OpenapisClient.VaultsDelete` - -```go -ctx := context.TODO() -id := openapis.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") - -if err := client.VaultsDeleteThenPoll(ctx, id); err != nil { - // handle the error -} -``` - - -### Example Usage: `OpenapisClient.VaultsGet` - -```go -ctx := context.TODO() -id := openapis.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") - -read, err := client.VaultsGet(ctx, id) -if err != nil { - // handle the error -} -if model := read.Model; model != nil { - // do something with the model/response object -} -``` - - -### Example Usage: `OpenapisClient.VaultsListBySubscriptionId` - -```go -ctx := context.TODO() -id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") - -// alternatively `client.VaultsListBySubscriptionId(ctx, id)` can be used to do batched pagination -items, err := client.VaultsListBySubscriptionIdComplete(ctx, id) -if err != nil { - // handle the error -} -for _, item := range items { - // do something -} -``` - - -### Example Usage: `OpenapisClient.VaultsUpdate` - -```go -ctx := context.TODO() -id := openapis.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") - -payload := openapis.PatchVault{ - // ... -} - - -if err := client.VaultsUpdateThenPoll(ctx, id, payload, openapis.DefaultVaultsUpdateOperationOptions()); err != nil { - // handle the error -} -``` diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/constants.go b/resource-manager/recoveryservices/2025-02-01/openapis/constants.go index c8ecde98cb4..27045c5beb9 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/constants.go +++ b/resource-manager/recoveryservices/2025-02-01/openapis/constants.go @@ -9,1051 +9,6 @@ import ( // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. -type AlertsState string - -const ( - AlertsStateDisabled AlertsState = "Disabled" - AlertsStateEnabled AlertsState = "Enabled" -) - -func PossibleValuesForAlertsState() []string { - return []string{ - string(AlertsStateDisabled), - string(AlertsStateEnabled), - } -} - -func (s *AlertsState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseAlertsState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseAlertsState(input string) (*AlertsState, error) { - vals := map[string]AlertsState{ - "disabled": AlertsStateDisabled, - "enabled": AlertsStateEnabled, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := AlertsState(input) - return &out, nil -} - -type AuthType string - -const ( - AuthTypeAAD AuthType = "AAD" - AuthTypeACS AuthType = "ACS" - AuthTypeAccessControlService AuthType = "AccessControlService" - AuthTypeAzureActiveDirectory AuthType = "AzureActiveDirectory" - AuthTypeInvalid AuthType = "Invalid" -) - -func PossibleValuesForAuthType() []string { - return []string{ - string(AuthTypeAAD), - string(AuthTypeACS), - string(AuthTypeAccessControlService), - string(AuthTypeAzureActiveDirectory), - string(AuthTypeInvalid), - } -} - -func (s *AuthType) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseAuthType(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseAuthType(input string) (*AuthType, error) { - vals := map[string]AuthType{ - "aad": AuthTypeAAD, - "acs": AuthTypeACS, - "accesscontrolservice": AuthTypeAccessControlService, - "azureactivedirectory": AuthTypeAzureActiveDirectory, - "invalid": AuthTypeInvalid, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := AuthType(input) - return &out, nil -} - -type BCDRSecurityLevel string - -const ( - BCDRSecurityLevelExcellent BCDRSecurityLevel = "Excellent" - BCDRSecurityLevelFair BCDRSecurityLevel = "Fair" - BCDRSecurityLevelGood BCDRSecurityLevel = "Good" - BCDRSecurityLevelPoor BCDRSecurityLevel = "Poor" -) - -func PossibleValuesForBCDRSecurityLevel() []string { - return []string{ - string(BCDRSecurityLevelExcellent), - string(BCDRSecurityLevelFair), - string(BCDRSecurityLevelGood), - string(BCDRSecurityLevelPoor), - } -} - -func (s *BCDRSecurityLevel) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseBCDRSecurityLevel(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseBCDRSecurityLevel(input string) (*BCDRSecurityLevel, error) { - vals := map[string]BCDRSecurityLevel{ - "excellent": BCDRSecurityLevelExcellent, - "fair": BCDRSecurityLevelFair, - "good": BCDRSecurityLevelGood, - "poor": BCDRSecurityLevelPoor, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := BCDRSecurityLevel(input) - return &out, nil -} - -type BackupStorageVersion string - -const ( - BackupStorageVersionUnassigned BackupStorageVersion = "Unassigned" - BackupStorageVersionVOne BackupStorageVersion = "V1" - BackupStorageVersionVTwo BackupStorageVersion = "V2" -) - -func PossibleValuesForBackupStorageVersion() []string { - return []string{ - string(BackupStorageVersionUnassigned), - string(BackupStorageVersionVOne), - string(BackupStorageVersionVTwo), - } -} - -func (s *BackupStorageVersion) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseBackupStorageVersion(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseBackupStorageVersion(input string) (*BackupStorageVersion, error) { - vals := map[string]BackupStorageVersion{ - "unassigned": BackupStorageVersionUnassigned, - "v1": BackupStorageVersionVOne, - "v2": BackupStorageVersionVTwo, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := BackupStorageVersion(input) - return &out, nil -} - -type CrossRegionRestore string - -const ( - CrossRegionRestoreDisabled CrossRegionRestore = "Disabled" - CrossRegionRestoreEnabled CrossRegionRestore = "Enabled" -) - -func PossibleValuesForCrossRegionRestore() []string { - return []string{ - string(CrossRegionRestoreDisabled), - string(CrossRegionRestoreEnabled), - } -} - -func (s *CrossRegionRestore) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseCrossRegionRestore(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseCrossRegionRestore(input string) (*CrossRegionRestore, error) { - vals := map[string]CrossRegionRestore{ - "disabled": CrossRegionRestoreDisabled, - "enabled": CrossRegionRestoreEnabled, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := CrossRegionRestore(input) - return &out, nil -} - -type CrossSubscriptionRestoreState string - -const ( - CrossSubscriptionRestoreStateDisabled CrossSubscriptionRestoreState = "Disabled" - CrossSubscriptionRestoreStateEnabled CrossSubscriptionRestoreState = "Enabled" - CrossSubscriptionRestoreStatePermanentlyDisabled CrossSubscriptionRestoreState = "PermanentlyDisabled" -) - -func PossibleValuesForCrossSubscriptionRestoreState() []string { - return []string{ - string(CrossSubscriptionRestoreStateDisabled), - string(CrossSubscriptionRestoreStateEnabled), - string(CrossSubscriptionRestoreStatePermanentlyDisabled), - } -} - -func (s *CrossSubscriptionRestoreState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseCrossSubscriptionRestoreState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseCrossSubscriptionRestoreState(input string) (*CrossSubscriptionRestoreState, error) { - vals := map[string]CrossSubscriptionRestoreState{ - "disabled": CrossSubscriptionRestoreStateDisabled, - "enabled": CrossSubscriptionRestoreStateEnabled, - "permanentlydisabled": CrossSubscriptionRestoreStatePermanentlyDisabled, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := CrossSubscriptionRestoreState(input) - return &out, nil -} - -type EnhancedSecurityState string - -const ( - EnhancedSecurityStateAlwaysON EnhancedSecurityState = "AlwaysON" - EnhancedSecurityStateDisabled EnhancedSecurityState = "Disabled" - EnhancedSecurityStateEnabled EnhancedSecurityState = "Enabled" - EnhancedSecurityStateInvalid EnhancedSecurityState = "Invalid" -) - -func PossibleValuesForEnhancedSecurityState() []string { - return []string{ - string(EnhancedSecurityStateAlwaysON), - string(EnhancedSecurityStateDisabled), - string(EnhancedSecurityStateEnabled), - string(EnhancedSecurityStateInvalid), - } -} - -func (s *EnhancedSecurityState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseEnhancedSecurityState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseEnhancedSecurityState(input string) (*EnhancedSecurityState, error) { - vals := map[string]EnhancedSecurityState{ - "alwayson": EnhancedSecurityStateAlwaysON, - "disabled": EnhancedSecurityStateDisabled, - "enabled": EnhancedSecurityStateEnabled, - "invalid": EnhancedSecurityStateInvalid, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := EnhancedSecurityState(input) - return &out, nil -} - -type IdentityType string - -const ( - IdentityTypeSystemAssigned IdentityType = "SystemAssigned" - IdentityTypeUserAssigned IdentityType = "UserAssigned" -) - -func PossibleValuesForIdentityType() []string { - return []string{ - string(IdentityTypeSystemAssigned), - string(IdentityTypeUserAssigned), - } -} - -func (s *IdentityType) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseIdentityType(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseIdentityType(input string) (*IdentityType, error) { - vals := map[string]IdentityType{ - "systemassigned": IdentityTypeSystemAssigned, - "userassigned": IdentityTypeUserAssigned, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := IdentityType(input) - return &out, nil -} - -type ImmutabilityState string - -const ( - ImmutabilityStateDisabled ImmutabilityState = "Disabled" - ImmutabilityStateLocked ImmutabilityState = "Locked" - ImmutabilityStateUnlocked ImmutabilityState = "Unlocked" -) - -func PossibleValuesForImmutabilityState() []string { - return []string{ - string(ImmutabilityStateDisabled), - string(ImmutabilityStateLocked), - string(ImmutabilityStateUnlocked), - } -} - -func (s *ImmutabilityState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseImmutabilityState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseImmutabilityState(input string) (*ImmutabilityState, error) { - vals := map[string]ImmutabilityState{ - "disabled": ImmutabilityStateDisabled, - "locked": ImmutabilityStateLocked, - "unlocked": ImmutabilityStateUnlocked, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := ImmutabilityState(input) - return &out, nil -} - -type InfrastructureEncryptionState string - -const ( - InfrastructureEncryptionStateDisabled InfrastructureEncryptionState = "Disabled" - InfrastructureEncryptionStateEnabled InfrastructureEncryptionState = "Enabled" -) - -func PossibleValuesForInfrastructureEncryptionState() []string { - return []string{ - string(InfrastructureEncryptionStateDisabled), - string(InfrastructureEncryptionStateEnabled), - } -} - -func (s *InfrastructureEncryptionState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseInfrastructureEncryptionState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseInfrastructureEncryptionState(input string) (*InfrastructureEncryptionState, error) { - vals := map[string]InfrastructureEncryptionState{ - "disabled": InfrastructureEncryptionStateDisabled, - "enabled": InfrastructureEncryptionStateEnabled, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := InfrastructureEncryptionState(input) - return &out, nil -} - -type MultiUserAuthorization string - -const ( - MultiUserAuthorizationDisabled MultiUserAuthorization = "Disabled" - MultiUserAuthorizationEnabled MultiUserAuthorization = "Enabled" - MultiUserAuthorizationInvalid MultiUserAuthorization = "Invalid" -) - -func PossibleValuesForMultiUserAuthorization() []string { - return []string{ - string(MultiUserAuthorizationDisabled), - string(MultiUserAuthorizationEnabled), - string(MultiUserAuthorizationInvalid), - } -} - -func (s *MultiUserAuthorization) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseMultiUserAuthorization(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseMultiUserAuthorization(input string) (*MultiUserAuthorization, error) { - vals := map[string]MultiUserAuthorization{ - "disabled": MultiUserAuthorizationDisabled, - "enabled": MultiUserAuthorizationEnabled, - "invalid": MultiUserAuthorizationInvalid, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := MultiUserAuthorization(input) - return &out, nil -} - -type PrivateEndpointConnectionStatus string - -const ( - PrivateEndpointConnectionStatusApproved PrivateEndpointConnectionStatus = "Approved" - PrivateEndpointConnectionStatusDisconnected PrivateEndpointConnectionStatus = "Disconnected" - PrivateEndpointConnectionStatusPending PrivateEndpointConnectionStatus = "Pending" - PrivateEndpointConnectionStatusRejected PrivateEndpointConnectionStatus = "Rejected" -) - -func PossibleValuesForPrivateEndpointConnectionStatus() []string { - return []string{ - string(PrivateEndpointConnectionStatusApproved), - string(PrivateEndpointConnectionStatusDisconnected), - string(PrivateEndpointConnectionStatusPending), - string(PrivateEndpointConnectionStatusRejected), - } -} - -func (s *PrivateEndpointConnectionStatus) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parsePrivateEndpointConnectionStatus(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parsePrivateEndpointConnectionStatus(input string) (*PrivateEndpointConnectionStatus, error) { - vals := map[string]PrivateEndpointConnectionStatus{ - "approved": PrivateEndpointConnectionStatusApproved, - "disconnected": PrivateEndpointConnectionStatusDisconnected, - "pending": PrivateEndpointConnectionStatusPending, - "rejected": PrivateEndpointConnectionStatusRejected, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := PrivateEndpointConnectionStatus(input) - return &out, nil -} - -type ProvisioningState string - -const ( - ProvisioningStateDeleting ProvisioningState = "Deleting" - ProvisioningStateFailed ProvisioningState = "Failed" - ProvisioningStatePending ProvisioningState = "Pending" - ProvisioningStateSucceeded ProvisioningState = "Succeeded" -) - -func PossibleValuesForProvisioningState() []string { - return []string{ - string(ProvisioningStateDeleting), - string(ProvisioningStateFailed), - string(ProvisioningStatePending), - string(ProvisioningStateSucceeded), - } -} - -func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseProvisioningState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseProvisioningState(input string) (*ProvisioningState, error) { - vals := map[string]ProvisioningState{ - "deleting": ProvisioningStateDeleting, - "failed": ProvisioningStateFailed, - "pending": ProvisioningStatePending, - "succeeded": ProvisioningStateSucceeded, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := ProvisioningState(input) - return &out, nil -} - -type PublicNetworkAccess string - -const ( - PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" - PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" -) - -func PossibleValuesForPublicNetworkAccess() []string { - return []string{ - string(PublicNetworkAccessDisabled), - string(PublicNetworkAccessEnabled), - } -} - -func (s *PublicNetworkAccess) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parsePublicNetworkAccess(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parsePublicNetworkAccess(input string) (*PublicNetworkAccess, error) { - vals := map[string]PublicNetworkAccess{ - "disabled": PublicNetworkAccessDisabled, - "enabled": PublicNetworkAccessEnabled, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := PublicNetworkAccess(input) - return &out, nil -} - -type ResourceMoveState string - -const ( - ResourceMoveStateCommitFailed ResourceMoveState = "CommitFailed" - ResourceMoveStateCommitTimedout ResourceMoveState = "CommitTimedout" - ResourceMoveStateCriticalFailure ResourceMoveState = "CriticalFailure" - ResourceMoveStateFailure ResourceMoveState = "Failure" - ResourceMoveStateInProgress ResourceMoveState = "InProgress" - ResourceMoveStateMoveSucceeded ResourceMoveState = "MoveSucceeded" - ResourceMoveStatePartialSuccess ResourceMoveState = "PartialSuccess" - ResourceMoveStatePrepareFailed ResourceMoveState = "PrepareFailed" - ResourceMoveStatePrepareTimedout ResourceMoveState = "PrepareTimedout" - ResourceMoveStateUnknown ResourceMoveState = "Unknown" -) - -func PossibleValuesForResourceMoveState() []string { - return []string{ - string(ResourceMoveStateCommitFailed), - string(ResourceMoveStateCommitTimedout), - string(ResourceMoveStateCriticalFailure), - string(ResourceMoveStateFailure), - string(ResourceMoveStateInProgress), - string(ResourceMoveStateMoveSucceeded), - string(ResourceMoveStatePartialSuccess), - string(ResourceMoveStatePrepareFailed), - string(ResourceMoveStatePrepareTimedout), - string(ResourceMoveStateUnknown), - } -} - -func (s *ResourceMoveState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseResourceMoveState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseResourceMoveState(input string) (*ResourceMoveState, error) { - vals := map[string]ResourceMoveState{ - "commitfailed": ResourceMoveStateCommitFailed, - "committimedout": ResourceMoveStateCommitTimedout, - "criticalfailure": ResourceMoveStateCriticalFailure, - "failure": ResourceMoveStateFailure, - "inprogress": ResourceMoveStateInProgress, - "movesucceeded": ResourceMoveStateMoveSucceeded, - "partialsuccess": ResourceMoveStatePartialSuccess, - "preparefailed": ResourceMoveStatePrepareFailed, - "preparetimedout": ResourceMoveStatePrepareTimedout, - "unknown": ResourceMoveStateUnknown, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := ResourceMoveState(input) - return &out, nil -} - -type SecureScoreLevel string - -const ( - SecureScoreLevelAdequate SecureScoreLevel = "Adequate" - SecureScoreLevelMaximum SecureScoreLevel = "Maximum" - SecureScoreLevelMinimum SecureScoreLevel = "Minimum" - SecureScoreLevelNone SecureScoreLevel = "None" -) - -func PossibleValuesForSecureScoreLevel() []string { - return []string{ - string(SecureScoreLevelAdequate), - string(SecureScoreLevelMaximum), - string(SecureScoreLevelMinimum), - string(SecureScoreLevelNone), - } -} - -func (s *SecureScoreLevel) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseSecureScoreLevel(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseSecureScoreLevel(input string) (*SecureScoreLevel, error) { - vals := map[string]SecureScoreLevel{ - "adequate": SecureScoreLevelAdequate, - "maximum": SecureScoreLevelMaximum, - "minimum": SecureScoreLevelMinimum, - "none": SecureScoreLevelNone, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := SecureScoreLevel(input) - return &out, nil -} - -type SkuName string - -const ( - SkuNameRSZero SkuName = "RS0" - SkuNameStandard SkuName = "Standard" -) - -func PossibleValuesForSkuName() []string { - return []string{ - string(SkuNameRSZero), - string(SkuNameStandard), - } -} - -func (s *SkuName) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseSkuName(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseSkuName(input string) (*SkuName, error) { - vals := map[string]SkuName{ - "rs0": SkuNameRSZero, - "standard": SkuNameStandard, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := SkuName(input) - return &out, nil -} - -type SoftDeleteState string - -const ( - SoftDeleteStateAlwaysON SoftDeleteState = "AlwaysON" - SoftDeleteStateDisabled SoftDeleteState = "Disabled" - SoftDeleteStateEnabled SoftDeleteState = "Enabled" - SoftDeleteStateInvalid SoftDeleteState = "Invalid" -) - -func PossibleValuesForSoftDeleteState() []string { - return []string{ - string(SoftDeleteStateAlwaysON), - string(SoftDeleteStateDisabled), - string(SoftDeleteStateEnabled), - string(SoftDeleteStateInvalid), - } -} - -func (s *SoftDeleteState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseSoftDeleteState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseSoftDeleteState(input string) (*SoftDeleteState, error) { - vals := map[string]SoftDeleteState{ - "alwayson": SoftDeleteStateAlwaysON, - "disabled": SoftDeleteStateDisabled, - "enabled": SoftDeleteStateEnabled, - "invalid": SoftDeleteStateInvalid, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := SoftDeleteState(input) - return &out, nil -} - -type StandardTierStorageRedundancy string - -const ( - StandardTierStorageRedundancyGeoRedundant StandardTierStorageRedundancy = "GeoRedundant" - StandardTierStorageRedundancyInvalid StandardTierStorageRedundancy = "Invalid" - StandardTierStorageRedundancyLocallyRedundant StandardTierStorageRedundancy = "LocallyRedundant" - StandardTierStorageRedundancyZoneRedundant StandardTierStorageRedundancy = "ZoneRedundant" -) - -func PossibleValuesForStandardTierStorageRedundancy() []string { - return []string{ - string(StandardTierStorageRedundancyGeoRedundant), - string(StandardTierStorageRedundancyInvalid), - string(StandardTierStorageRedundancyLocallyRedundant), - string(StandardTierStorageRedundancyZoneRedundant), - } -} - -func (s *StandardTierStorageRedundancy) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseStandardTierStorageRedundancy(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseStandardTierStorageRedundancy(input string) (*StandardTierStorageRedundancy, error) { - vals := map[string]StandardTierStorageRedundancy{ - "georedundant": StandardTierStorageRedundancyGeoRedundant, - "invalid": StandardTierStorageRedundancyInvalid, - "locallyredundant": StandardTierStorageRedundancyLocallyRedundant, - "zoneredundant": StandardTierStorageRedundancyZoneRedundant, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := StandardTierStorageRedundancy(input) - return &out, nil -} - -type State string - -const ( - StateDisabled State = "Disabled" - StateEnabled State = "Enabled" - StateInvalid State = "Invalid" -) - -func PossibleValuesForState() []string { - return []string{ - string(StateDisabled), - string(StateEnabled), - string(StateInvalid), - } -} - -func (s *State) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseState(input string) (*State, error) { - vals := map[string]State{ - "disabled": StateDisabled, - "enabled": StateEnabled, - "invalid": StateInvalid, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := State(input) - return &out, nil -} - -type TriggerType string - -const ( - TriggerTypeForcedUpgrade TriggerType = "ForcedUpgrade" - TriggerTypeUserTriggered TriggerType = "UserTriggered" -) - -func PossibleValuesForTriggerType() []string { - return []string{ - string(TriggerTypeForcedUpgrade), - string(TriggerTypeUserTriggered), - } -} - -func (s *TriggerType) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseTriggerType(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseTriggerType(input string) (*TriggerType, error) { - vals := map[string]TriggerType{ - "forcedupgrade": TriggerTypeForcedUpgrade, - "usertriggered": TriggerTypeUserTriggered, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := TriggerType(input) - return &out, nil -} - -type UsagesUnit string - -const ( - UsagesUnitBytes UsagesUnit = "Bytes" - UsagesUnitBytesPerSecond UsagesUnit = "BytesPerSecond" - UsagesUnitCount UsagesUnit = "Count" - UsagesUnitCountPerSecond UsagesUnit = "CountPerSecond" - UsagesUnitPercent UsagesUnit = "Percent" - UsagesUnitSeconds UsagesUnit = "Seconds" -) - -func PossibleValuesForUsagesUnit() []string { - return []string{ - string(UsagesUnitBytes), - string(UsagesUnitBytesPerSecond), - string(UsagesUnitCount), - string(UsagesUnitCountPerSecond), - string(UsagesUnitPercent), - string(UsagesUnitSeconds), - } -} - -func (s *UsagesUnit) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseUsagesUnit(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseUsagesUnit(input string) (*UsagesUnit, error) { - vals := map[string]UsagesUnit{ - "bytes": UsagesUnitBytes, - "bytespersecond": UsagesUnitBytesPerSecond, - "count": UsagesUnitCount, - "countpersecond": UsagesUnitCountPerSecond, - "percent": UsagesUnitPercent, - "seconds": UsagesUnitSeconds, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := UsagesUnit(input) - return &out, nil -} - -type VaultPrivateEndpointState string - -const ( - VaultPrivateEndpointStateEnabled VaultPrivateEndpointState = "Enabled" - VaultPrivateEndpointStateNone VaultPrivateEndpointState = "None" -) - -func PossibleValuesForVaultPrivateEndpointState() []string { - return []string{ - string(VaultPrivateEndpointStateEnabled), - string(VaultPrivateEndpointStateNone), - } -} - -func (s *VaultPrivateEndpointState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseVaultPrivateEndpointState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseVaultPrivateEndpointState(input string) (*VaultPrivateEndpointState, error) { - vals := map[string]VaultPrivateEndpointState{ - "enabled": VaultPrivateEndpointStateEnabled, - "none": VaultPrivateEndpointStateNone, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := VaultPrivateEndpointState(input) - return &out, nil -} - type VaultSubResourceType string const ( @@ -1097,50 +52,3 @@ func parseVaultSubResourceType(input string) (*VaultSubResourceType, error) { out := VaultSubResourceType(input) return &out, nil } - -type VaultUpgradeState string - -const ( - VaultUpgradeStateFailed VaultUpgradeState = "Failed" - VaultUpgradeStateInProgress VaultUpgradeState = "InProgress" - VaultUpgradeStateUnknown VaultUpgradeState = "Unknown" - VaultUpgradeStateUpgraded VaultUpgradeState = "Upgraded" -) - -func PossibleValuesForVaultUpgradeState() []string { - return []string{ - string(VaultUpgradeStateFailed), - string(VaultUpgradeStateInProgress), - string(VaultUpgradeStateUnknown), - string(VaultUpgradeStateUpgraded), - } -} - -func (s *VaultUpgradeState) UnmarshalJSON(bytes []byte) error { - var decoded string - if err := json.Unmarshal(bytes, &decoded); err != nil { - return fmt.Errorf("unmarshaling: %+v", err) - } - out, err := parseVaultUpgradeState(decoded) - if err != nil { - return fmt.Errorf("parsing %q: %+v", decoded, err) - } - *s = *out - return nil -} - -func parseVaultUpgradeState(input string) (*VaultUpgradeState, error) { - vals := map[string]VaultUpgradeState{ - "failed": VaultUpgradeStateFailed, - "inprogress": VaultUpgradeStateInProgress, - "unknown": VaultUpgradeStateUnknown, - "upgraded": VaultUpgradeStateUpgraded, - } - if v, ok := vals[strings.ToLower(input)]; ok { - return &v, nil - } - - // otherwise presume it's an undefined value and best-effort it - out := VaultUpgradeState(input) - return &out, nil -} diff --git a/resource-manager/recoveryservices/2025-02-01/vaults/README.md b/resource-manager/recoveryservices/2025-02-01/vaults/README.md new file mode 100644 index 00000000000..9b22b0c0607 --- /dev/null +++ b/resource-manager/recoveryservices/2025-02-01/vaults/README.md @@ -0,0 +1,171 @@ + +## `github.com/hashicorp/go-azure-sdk/resource-manager/recoveryservices/2025-02-01/vaults` Documentation + +The `vaults` SDK allows for interaction with Azure Resource Manager `recoveryservices` (API Version `2025-02-01`). + +This readme covers example usages, but further information on [using this SDK can be found in the project root](https://github.com/hashicorp/go-azure-sdk/tree/main/docs). + +### Import Path + +```go +import "github.com/hashicorp/go-azure-helpers/resourcemanager/commonids" +import "github.com/hashicorp/go-azure-sdk/resource-manager/recoveryservices/2025-02-01/vaults" +``` + + +### Client Initialization + +```go +client := vaults.NewVaultsClientWithBaseURI("https://management.azure.com") +client.Client.Authorizer = authorizer +``` + + +### Example Usage: `VaultsClient.CreateOrUpdate` + +```go +ctx := context.TODO() +id := vaults.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") + +payload := vaults.Vault{ + // ... +} + + +if err := client.CreateOrUpdateThenPoll(ctx, id, payload, vaults.DefaultCreateOrUpdateOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `VaultsClient.Delete` + +```go +ctx := context.TODO() +id := vaults.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") + +if err := client.DeleteThenPoll(ctx, id); err != nil { + // handle the error +} +``` + + +### Example Usage: `VaultsClient.Get` + +```go +ctx := context.TODO() +id := vaults.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") + +read, err := client.Get(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VaultsClient.ListBySubscriptionId` + +```go +ctx := context.TODO() +id := commonids.NewSubscriptionID("12345678-1234-9876-4563-123456789012") + +// alternatively `client.ListBySubscriptionId(ctx, id)` can be used to do batched pagination +items, err := client.ListBySubscriptionIdComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `VaultsClient.RegisteredIdentitiesDelete` + +```go +ctx := context.TODO() +id := vaults.NewRegisteredIdentityID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName", "registeredIdentityName") + +read, err := client.RegisteredIdentitiesDelete(ctx, id) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` + + +### Example Usage: `VaultsClient.ReplicationUsagesList` + +```go +ctx := context.TODO() +id := vaults.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") + +// alternatively `client.ReplicationUsagesList(ctx, id)` can be used to do batched pagination +items, err := client.ReplicationUsagesListComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `VaultsClient.Update` + +```go +ctx := context.TODO() +id := vaults.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") + +payload := vaults.PatchVault{ + // ... +} + + +if err := client.UpdateThenPoll(ctx, id, payload, vaults.DefaultUpdateOperationOptions()); err != nil { + // handle the error +} +``` + + +### Example Usage: `VaultsClient.UsagesListByVaults` + +```go +ctx := context.TODO() +id := vaults.NewVaultID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName") + +// alternatively `client.UsagesListByVaults(ctx, id)` can be used to do batched pagination +items, err := client.UsagesListByVaultsComplete(ctx, id) +if err != nil { + // handle the error +} +for _, item := range items { + // do something +} +``` + + +### Example Usage: `VaultsClient.VaultCertificatesCreate` + +```go +ctx := context.TODO() +id := vaults.NewCertificateID("12345678-1234-9876-4563-123456789012", "example-resource-group", "vaultName", "certificateName") + +payload := vaults.CertificateRequest{ + // ... +} + + +read, err := client.VaultCertificatesCreate(ctx, id, payload) +if err != nil { + // handle the error +} +if model := read.Model; model != nil { + // do something with the model/response object +} +``` diff --git a/resource-manager/recoveryservices/2025-02-01/vaults/client.go b/resource-manager/recoveryservices/2025-02-01/vaults/client.go new file mode 100644 index 00000000000..37bd7a44670 --- /dev/null +++ b/resource-manager/recoveryservices/2025-02-01/vaults/client.go @@ -0,0 +1,26 @@ +package vaults + +import ( + "fmt" + + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + sdkEnv "github.com/hashicorp/go-azure-sdk/sdk/environments" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type VaultsClient struct { + Client *resourcemanager.Client +} + +func NewVaultsClientWithBaseURI(sdkApi sdkEnv.Api) (*VaultsClient, error) { + client, err := resourcemanager.NewClient(sdkApi, "vaults", defaultApiVersion) + if err != nil { + return nil, fmt.Errorf("instantiating VaultsClient: %+v", err) + } + + return &VaultsClient{ + Client: client, + }, nil +} diff --git a/resource-manager/recoveryservices/2025-02-01/vaults/constants.go b/resource-manager/recoveryservices/2025-02-01/vaults/constants.go new file mode 100644 index 00000000000..d76a6f55ab3 --- /dev/null +++ b/resource-manager/recoveryservices/2025-02-01/vaults/constants.go @@ -0,0 +1,1146 @@ +package vaults + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type AlertsState string + +const ( + AlertsStateDisabled AlertsState = "Disabled" + AlertsStateEnabled AlertsState = "Enabled" +) + +func PossibleValuesForAlertsState() []string { + return []string{ + string(AlertsStateDisabled), + string(AlertsStateEnabled), + } +} + +func (s *AlertsState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAlertsState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAlertsState(input string) (*AlertsState, error) { + vals := map[string]AlertsState{ + "disabled": AlertsStateDisabled, + "enabled": AlertsStateEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AlertsState(input) + return &out, nil +} + +type AuthType string + +const ( + AuthTypeAAD AuthType = "AAD" + AuthTypeACS AuthType = "ACS" + AuthTypeAccessControlService AuthType = "AccessControlService" + AuthTypeAzureActiveDirectory AuthType = "AzureActiveDirectory" + AuthTypeInvalid AuthType = "Invalid" +) + +func PossibleValuesForAuthType() []string { + return []string{ + string(AuthTypeAAD), + string(AuthTypeACS), + string(AuthTypeAccessControlService), + string(AuthTypeAzureActiveDirectory), + string(AuthTypeInvalid), + } +} + +func (s *AuthType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseAuthType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseAuthType(input string) (*AuthType, error) { + vals := map[string]AuthType{ + "aad": AuthTypeAAD, + "acs": AuthTypeACS, + "accesscontrolservice": AuthTypeAccessControlService, + "azureactivedirectory": AuthTypeAzureActiveDirectory, + "invalid": AuthTypeInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := AuthType(input) + return &out, nil +} + +type BCDRSecurityLevel string + +const ( + BCDRSecurityLevelExcellent BCDRSecurityLevel = "Excellent" + BCDRSecurityLevelFair BCDRSecurityLevel = "Fair" + BCDRSecurityLevelGood BCDRSecurityLevel = "Good" + BCDRSecurityLevelPoor BCDRSecurityLevel = "Poor" +) + +func PossibleValuesForBCDRSecurityLevel() []string { + return []string{ + string(BCDRSecurityLevelExcellent), + string(BCDRSecurityLevelFair), + string(BCDRSecurityLevelGood), + string(BCDRSecurityLevelPoor), + } +} + +func (s *BCDRSecurityLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBCDRSecurityLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBCDRSecurityLevel(input string) (*BCDRSecurityLevel, error) { + vals := map[string]BCDRSecurityLevel{ + "excellent": BCDRSecurityLevelExcellent, + "fair": BCDRSecurityLevelFair, + "good": BCDRSecurityLevelGood, + "poor": BCDRSecurityLevelPoor, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BCDRSecurityLevel(input) + return &out, nil +} + +type BackupStorageVersion string + +const ( + BackupStorageVersionUnassigned BackupStorageVersion = "Unassigned" + BackupStorageVersionVOne BackupStorageVersion = "V1" + BackupStorageVersionVTwo BackupStorageVersion = "V2" +) + +func PossibleValuesForBackupStorageVersion() []string { + return []string{ + string(BackupStorageVersionUnassigned), + string(BackupStorageVersionVOne), + string(BackupStorageVersionVTwo), + } +} + +func (s *BackupStorageVersion) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseBackupStorageVersion(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseBackupStorageVersion(input string) (*BackupStorageVersion, error) { + vals := map[string]BackupStorageVersion{ + "unassigned": BackupStorageVersionUnassigned, + "v1": BackupStorageVersionVOne, + "v2": BackupStorageVersionVTwo, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := BackupStorageVersion(input) + return &out, nil +} + +type CrossRegionRestore string + +const ( + CrossRegionRestoreDisabled CrossRegionRestore = "Disabled" + CrossRegionRestoreEnabled CrossRegionRestore = "Enabled" +) + +func PossibleValuesForCrossRegionRestore() []string { + return []string{ + string(CrossRegionRestoreDisabled), + string(CrossRegionRestoreEnabled), + } +} + +func (s *CrossRegionRestore) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCrossRegionRestore(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCrossRegionRestore(input string) (*CrossRegionRestore, error) { + vals := map[string]CrossRegionRestore{ + "disabled": CrossRegionRestoreDisabled, + "enabled": CrossRegionRestoreEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CrossRegionRestore(input) + return &out, nil +} + +type CrossSubscriptionRestoreState string + +const ( + CrossSubscriptionRestoreStateDisabled CrossSubscriptionRestoreState = "Disabled" + CrossSubscriptionRestoreStateEnabled CrossSubscriptionRestoreState = "Enabled" + CrossSubscriptionRestoreStatePermanentlyDisabled CrossSubscriptionRestoreState = "PermanentlyDisabled" +) + +func PossibleValuesForCrossSubscriptionRestoreState() []string { + return []string{ + string(CrossSubscriptionRestoreStateDisabled), + string(CrossSubscriptionRestoreStateEnabled), + string(CrossSubscriptionRestoreStatePermanentlyDisabled), + } +} + +func (s *CrossSubscriptionRestoreState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseCrossSubscriptionRestoreState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseCrossSubscriptionRestoreState(input string) (*CrossSubscriptionRestoreState, error) { + vals := map[string]CrossSubscriptionRestoreState{ + "disabled": CrossSubscriptionRestoreStateDisabled, + "enabled": CrossSubscriptionRestoreStateEnabled, + "permanentlydisabled": CrossSubscriptionRestoreStatePermanentlyDisabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := CrossSubscriptionRestoreState(input) + return &out, nil +} + +type EnhancedSecurityState string + +const ( + EnhancedSecurityStateAlwaysON EnhancedSecurityState = "AlwaysON" + EnhancedSecurityStateDisabled EnhancedSecurityState = "Disabled" + EnhancedSecurityStateEnabled EnhancedSecurityState = "Enabled" + EnhancedSecurityStateInvalid EnhancedSecurityState = "Invalid" +) + +func PossibleValuesForEnhancedSecurityState() []string { + return []string{ + string(EnhancedSecurityStateAlwaysON), + string(EnhancedSecurityStateDisabled), + string(EnhancedSecurityStateEnabled), + string(EnhancedSecurityStateInvalid), + } +} + +func (s *EnhancedSecurityState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseEnhancedSecurityState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseEnhancedSecurityState(input string) (*EnhancedSecurityState, error) { + vals := map[string]EnhancedSecurityState{ + "alwayson": EnhancedSecurityStateAlwaysON, + "disabled": EnhancedSecurityStateDisabled, + "enabled": EnhancedSecurityStateEnabled, + "invalid": EnhancedSecurityStateInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := EnhancedSecurityState(input) + return &out, nil +} + +type IdentityType string + +const ( + IdentityTypeSystemAssigned IdentityType = "SystemAssigned" + IdentityTypeUserAssigned IdentityType = "UserAssigned" +) + +func PossibleValuesForIdentityType() []string { + return []string{ + string(IdentityTypeSystemAssigned), + string(IdentityTypeUserAssigned), + } +} + +func (s *IdentityType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseIdentityType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseIdentityType(input string) (*IdentityType, error) { + vals := map[string]IdentityType{ + "systemassigned": IdentityTypeSystemAssigned, + "userassigned": IdentityTypeUserAssigned, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := IdentityType(input) + return &out, nil +} + +type ImmutabilityState string + +const ( + ImmutabilityStateDisabled ImmutabilityState = "Disabled" + ImmutabilityStateLocked ImmutabilityState = "Locked" + ImmutabilityStateUnlocked ImmutabilityState = "Unlocked" +) + +func PossibleValuesForImmutabilityState() []string { + return []string{ + string(ImmutabilityStateDisabled), + string(ImmutabilityStateLocked), + string(ImmutabilityStateUnlocked), + } +} + +func (s *ImmutabilityState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseImmutabilityState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseImmutabilityState(input string) (*ImmutabilityState, error) { + vals := map[string]ImmutabilityState{ + "disabled": ImmutabilityStateDisabled, + "locked": ImmutabilityStateLocked, + "unlocked": ImmutabilityStateUnlocked, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ImmutabilityState(input) + return &out, nil +} + +type InfrastructureEncryptionState string + +const ( + InfrastructureEncryptionStateDisabled InfrastructureEncryptionState = "Disabled" + InfrastructureEncryptionStateEnabled InfrastructureEncryptionState = "Enabled" +) + +func PossibleValuesForInfrastructureEncryptionState() []string { + return []string{ + string(InfrastructureEncryptionStateDisabled), + string(InfrastructureEncryptionStateEnabled), + } +} + +func (s *InfrastructureEncryptionState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseInfrastructureEncryptionState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseInfrastructureEncryptionState(input string) (*InfrastructureEncryptionState, error) { + vals := map[string]InfrastructureEncryptionState{ + "disabled": InfrastructureEncryptionStateDisabled, + "enabled": InfrastructureEncryptionStateEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := InfrastructureEncryptionState(input) + return &out, nil +} + +type MultiUserAuthorization string + +const ( + MultiUserAuthorizationDisabled MultiUserAuthorization = "Disabled" + MultiUserAuthorizationEnabled MultiUserAuthorization = "Enabled" + MultiUserAuthorizationInvalid MultiUserAuthorization = "Invalid" +) + +func PossibleValuesForMultiUserAuthorization() []string { + return []string{ + string(MultiUserAuthorizationDisabled), + string(MultiUserAuthorizationEnabled), + string(MultiUserAuthorizationInvalid), + } +} + +func (s *MultiUserAuthorization) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseMultiUserAuthorization(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseMultiUserAuthorization(input string) (*MultiUserAuthorization, error) { + vals := map[string]MultiUserAuthorization{ + "disabled": MultiUserAuthorizationDisabled, + "enabled": MultiUserAuthorizationEnabled, + "invalid": MultiUserAuthorizationInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := MultiUserAuthorization(input) + return &out, nil +} + +type PrivateEndpointConnectionStatus string + +const ( + PrivateEndpointConnectionStatusApproved PrivateEndpointConnectionStatus = "Approved" + PrivateEndpointConnectionStatusDisconnected PrivateEndpointConnectionStatus = "Disconnected" + PrivateEndpointConnectionStatusPending PrivateEndpointConnectionStatus = "Pending" + PrivateEndpointConnectionStatusRejected PrivateEndpointConnectionStatus = "Rejected" +) + +func PossibleValuesForPrivateEndpointConnectionStatus() []string { + return []string{ + string(PrivateEndpointConnectionStatusApproved), + string(PrivateEndpointConnectionStatusDisconnected), + string(PrivateEndpointConnectionStatusPending), + string(PrivateEndpointConnectionStatusRejected), + } +} + +func (s *PrivateEndpointConnectionStatus) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePrivateEndpointConnectionStatus(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePrivateEndpointConnectionStatus(input string) (*PrivateEndpointConnectionStatus, error) { + vals := map[string]PrivateEndpointConnectionStatus{ + "approved": PrivateEndpointConnectionStatusApproved, + "disconnected": PrivateEndpointConnectionStatusDisconnected, + "pending": PrivateEndpointConnectionStatusPending, + "rejected": PrivateEndpointConnectionStatusRejected, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PrivateEndpointConnectionStatus(input) + return &out, nil +} + +type ProvisioningState string + +const ( + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStatePending ProvisioningState = "Pending" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" +) + +func PossibleValuesForProvisioningState() []string { + return []string{ + string(ProvisioningStateDeleting), + string(ProvisioningStateFailed), + string(ProvisioningStatePending), + string(ProvisioningStateSucceeded), + } +} + +func (s *ProvisioningState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseProvisioningState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseProvisioningState(input string) (*ProvisioningState, error) { + vals := map[string]ProvisioningState{ + "deleting": ProvisioningStateDeleting, + "failed": ProvisioningStateFailed, + "pending": ProvisioningStatePending, + "succeeded": ProvisioningStateSucceeded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ProvisioningState(input) + return &out, nil +} + +type PublicNetworkAccess string + +const ( + PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" + PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" +) + +func PossibleValuesForPublicNetworkAccess() []string { + return []string{ + string(PublicNetworkAccessDisabled), + string(PublicNetworkAccessEnabled), + } +} + +func (s *PublicNetworkAccess) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parsePublicNetworkAccess(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parsePublicNetworkAccess(input string) (*PublicNetworkAccess, error) { + vals := map[string]PublicNetworkAccess{ + "disabled": PublicNetworkAccessDisabled, + "enabled": PublicNetworkAccessEnabled, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := PublicNetworkAccess(input) + return &out, nil +} + +type ResourceMoveState string + +const ( + ResourceMoveStateCommitFailed ResourceMoveState = "CommitFailed" + ResourceMoveStateCommitTimedout ResourceMoveState = "CommitTimedout" + ResourceMoveStateCriticalFailure ResourceMoveState = "CriticalFailure" + ResourceMoveStateFailure ResourceMoveState = "Failure" + ResourceMoveStateInProgress ResourceMoveState = "InProgress" + ResourceMoveStateMoveSucceeded ResourceMoveState = "MoveSucceeded" + ResourceMoveStatePartialSuccess ResourceMoveState = "PartialSuccess" + ResourceMoveStatePrepareFailed ResourceMoveState = "PrepareFailed" + ResourceMoveStatePrepareTimedout ResourceMoveState = "PrepareTimedout" + ResourceMoveStateUnknown ResourceMoveState = "Unknown" +) + +func PossibleValuesForResourceMoveState() []string { + return []string{ + string(ResourceMoveStateCommitFailed), + string(ResourceMoveStateCommitTimedout), + string(ResourceMoveStateCriticalFailure), + string(ResourceMoveStateFailure), + string(ResourceMoveStateInProgress), + string(ResourceMoveStateMoveSucceeded), + string(ResourceMoveStatePartialSuccess), + string(ResourceMoveStatePrepareFailed), + string(ResourceMoveStatePrepareTimedout), + string(ResourceMoveStateUnknown), + } +} + +func (s *ResourceMoveState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseResourceMoveState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseResourceMoveState(input string) (*ResourceMoveState, error) { + vals := map[string]ResourceMoveState{ + "commitfailed": ResourceMoveStateCommitFailed, + "committimedout": ResourceMoveStateCommitTimedout, + "criticalfailure": ResourceMoveStateCriticalFailure, + "failure": ResourceMoveStateFailure, + "inprogress": ResourceMoveStateInProgress, + "movesucceeded": ResourceMoveStateMoveSucceeded, + "partialsuccess": ResourceMoveStatePartialSuccess, + "preparefailed": ResourceMoveStatePrepareFailed, + "preparetimedout": ResourceMoveStatePrepareTimedout, + "unknown": ResourceMoveStateUnknown, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := ResourceMoveState(input) + return &out, nil +} + +type SecureScoreLevel string + +const ( + SecureScoreLevelAdequate SecureScoreLevel = "Adequate" + SecureScoreLevelMaximum SecureScoreLevel = "Maximum" + SecureScoreLevelMinimum SecureScoreLevel = "Minimum" + SecureScoreLevelNone SecureScoreLevel = "None" +) + +func PossibleValuesForSecureScoreLevel() []string { + return []string{ + string(SecureScoreLevelAdequate), + string(SecureScoreLevelMaximum), + string(SecureScoreLevelMinimum), + string(SecureScoreLevelNone), + } +} + +func (s *SecureScoreLevel) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSecureScoreLevel(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSecureScoreLevel(input string) (*SecureScoreLevel, error) { + vals := map[string]SecureScoreLevel{ + "adequate": SecureScoreLevelAdequate, + "maximum": SecureScoreLevelMaximum, + "minimum": SecureScoreLevelMinimum, + "none": SecureScoreLevelNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SecureScoreLevel(input) + return &out, nil +} + +type SkuName string + +const ( + SkuNameRSZero SkuName = "RS0" + SkuNameStandard SkuName = "Standard" +) + +func PossibleValuesForSkuName() []string { + return []string{ + string(SkuNameRSZero), + string(SkuNameStandard), + } +} + +func (s *SkuName) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSkuName(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSkuName(input string) (*SkuName, error) { + vals := map[string]SkuName{ + "rs0": SkuNameRSZero, + "standard": SkuNameStandard, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SkuName(input) + return &out, nil +} + +type SoftDeleteState string + +const ( + SoftDeleteStateAlwaysON SoftDeleteState = "AlwaysON" + SoftDeleteStateDisabled SoftDeleteState = "Disabled" + SoftDeleteStateEnabled SoftDeleteState = "Enabled" + SoftDeleteStateInvalid SoftDeleteState = "Invalid" +) + +func PossibleValuesForSoftDeleteState() []string { + return []string{ + string(SoftDeleteStateAlwaysON), + string(SoftDeleteStateDisabled), + string(SoftDeleteStateEnabled), + string(SoftDeleteStateInvalid), + } +} + +func (s *SoftDeleteState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseSoftDeleteState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseSoftDeleteState(input string) (*SoftDeleteState, error) { + vals := map[string]SoftDeleteState{ + "alwayson": SoftDeleteStateAlwaysON, + "disabled": SoftDeleteStateDisabled, + "enabled": SoftDeleteStateEnabled, + "invalid": SoftDeleteStateInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := SoftDeleteState(input) + return &out, nil +} + +type StandardTierStorageRedundancy string + +const ( + StandardTierStorageRedundancyGeoRedundant StandardTierStorageRedundancy = "GeoRedundant" + StandardTierStorageRedundancyInvalid StandardTierStorageRedundancy = "Invalid" + StandardTierStorageRedundancyLocallyRedundant StandardTierStorageRedundancy = "LocallyRedundant" + StandardTierStorageRedundancyZoneRedundant StandardTierStorageRedundancy = "ZoneRedundant" +) + +func PossibleValuesForStandardTierStorageRedundancy() []string { + return []string{ + string(StandardTierStorageRedundancyGeoRedundant), + string(StandardTierStorageRedundancyInvalid), + string(StandardTierStorageRedundancyLocallyRedundant), + string(StandardTierStorageRedundancyZoneRedundant), + } +} + +func (s *StandardTierStorageRedundancy) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseStandardTierStorageRedundancy(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseStandardTierStorageRedundancy(input string) (*StandardTierStorageRedundancy, error) { + vals := map[string]StandardTierStorageRedundancy{ + "georedundant": StandardTierStorageRedundancyGeoRedundant, + "invalid": StandardTierStorageRedundancyInvalid, + "locallyredundant": StandardTierStorageRedundancyLocallyRedundant, + "zoneredundant": StandardTierStorageRedundancyZoneRedundant, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := StandardTierStorageRedundancy(input) + return &out, nil +} + +type State string + +const ( + StateDisabled State = "Disabled" + StateEnabled State = "Enabled" + StateInvalid State = "Invalid" +) + +func PossibleValuesForState() []string { + return []string{ + string(StateDisabled), + string(StateEnabled), + string(StateInvalid), + } +} + +func (s *State) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseState(input string) (*State, error) { + vals := map[string]State{ + "disabled": StateDisabled, + "enabled": StateEnabled, + "invalid": StateInvalid, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := State(input) + return &out, nil +} + +type TriggerType string + +const ( + TriggerTypeForcedUpgrade TriggerType = "ForcedUpgrade" + TriggerTypeUserTriggered TriggerType = "UserTriggered" +) + +func PossibleValuesForTriggerType() []string { + return []string{ + string(TriggerTypeForcedUpgrade), + string(TriggerTypeUserTriggered), + } +} + +func (s *TriggerType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseTriggerType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseTriggerType(input string) (*TriggerType, error) { + vals := map[string]TriggerType{ + "forcedupgrade": TriggerTypeForcedUpgrade, + "usertriggered": TriggerTypeUserTriggered, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := TriggerType(input) + return &out, nil +} + +type UsagesUnit string + +const ( + UsagesUnitBytes UsagesUnit = "Bytes" + UsagesUnitBytesPerSecond UsagesUnit = "BytesPerSecond" + UsagesUnitCount UsagesUnit = "Count" + UsagesUnitCountPerSecond UsagesUnit = "CountPerSecond" + UsagesUnitPercent UsagesUnit = "Percent" + UsagesUnitSeconds UsagesUnit = "Seconds" +) + +func PossibleValuesForUsagesUnit() []string { + return []string{ + string(UsagesUnitBytes), + string(UsagesUnitBytesPerSecond), + string(UsagesUnitCount), + string(UsagesUnitCountPerSecond), + string(UsagesUnitPercent), + string(UsagesUnitSeconds), + } +} + +func (s *UsagesUnit) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseUsagesUnit(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseUsagesUnit(input string) (*UsagesUnit, error) { + vals := map[string]UsagesUnit{ + "bytes": UsagesUnitBytes, + "bytespersecond": UsagesUnitBytesPerSecond, + "count": UsagesUnitCount, + "countpersecond": UsagesUnitCountPerSecond, + "percent": UsagesUnitPercent, + "seconds": UsagesUnitSeconds, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := UsagesUnit(input) + return &out, nil +} + +type VaultPrivateEndpointState string + +const ( + VaultPrivateEndpointStateEnabled VaultPrivateEndpointState = "Enabled" + VaultPrivateEndpointStateNone VaultPrivateEndpointState = "None" +) + +func PossibleValuesForVaultPrivateEndpointState() []string { + return []string{ + string(VaultPrivateEndpointStateEnabled), + string(VaultPrivateEndpointStateNone), + } +} + +func (s *VaultPrivateEndpointState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseVaultPrivateEndpointState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseVaultPrivateEndpointState(input string) (*VaultPrivateEndpointState, error) { + vals := map[string]VaultPrivateEndpointState{ + "enabled": VaultPrivateEndpointStateEnabled, + "none": VaultPrivateEndpointStateNone, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VaultPrivateEndpointState(input) + return &out, nil +} + +type VaultSubResourceType string + +const ( + VaultSubResourceTypeAzureBackup VaultSubResourceType = "AzureBackup" + VaultSubResourceTypeAzureBackupSecondary VaultSubResourceType = "AzureBackup_secondary" + VaultSubResourceTypeAzureSiteRecovery VaultSubResourceType = "AzureSiteRecovery" +) + +func PossibleValuesForVaultSubResourceType() []string { + return []string{ + string(VaultSubResourceTypeAzureBackup), + string(VaultSubResourceTypeAzureBackupSecondary), + string(VaultSubResourceTypeAzureSiteRecovery), + } +} + +func (s *VaultSubResourceType) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseVaultSubResourceType(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseVaultSubResourceType(input string) (*VaultSubResourceType, error) { + vals := map[string]VaultSubResourceType{ + "azurebackup": VaultSubResourceTypeAzureBackup, + "azurebackup_secondary": VaultSubResourceTypeAzureBackupSecondary, + "azuresiterecovery": VaultSubResourceTypeAzureSiteRecovery, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VaultSubResourceType(input) + return &out, nil +} + +type VaultUpgradeState string + +const ( + VaultUpgradeStateFailed VaultUpgradeState = "Failed" + VaultUpgradeStateInProgress VaultUpgradeState = "InProgress" + VaultUpgradeStateUnknown VaultUpgradeState = "Unknown" + VaultUpgradeStateUpgraded VaultUpgradeState = "Upgraded" +) + +func PossibleValuesForVaultUpgradeState() []string { + return []string{ + string(VaultUpgradeStateFailed), + string(VaultUpgradeStateInProgress), + string(VaultUpgradeStateUnknown), + string(VaultUpgradeStateUpgraded), + } +} + +func (s *VaultUpgradeState) UnmarshalJSON(bytes []byte) error { + var decoded string + if err := json.Unmarshal(bytes, &decoded); err != nil { + return fmt.Errorf("unmarshaling: %+v", err) + } + out, err := parseVaultUpgradeState(decoded) + if err != nil { + return fmt.Errorf("parsing %q: %+v", decoded, err) + } + *s = *out + return nil +} + +func parseVaultUpgradeState(input string) (*VaultUpgradeState, error) { + vals := map[string]VaultUpgradeState{ + "failed": VaultUpgradeStateFailed, + "inprogress": VaultUpgradeStateInProgress, + "unknown": VaultUpgradeStateUnknown, + "upgraded": VaultUpgradeStateUpgraded, + } + if v, ok := vals[strings.ToLower(input)]; ok { + return &v, nil + } + + // otherwise presume it's an undefined value and best-effort it + out := VaultUpgradeState(input) + return &out, nil +} diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/id_certificate.go b/resource-manager/recoveryservices/2025-02-01/vaults/id_certificate.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/id_certificate.go rename to resource-manager/recoveryservices/2025-02-01/vaults/id_certificate.go index 93c9788e197..111607329c3 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/id_certificate.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/id_certificate.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "fmt" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/id_certificate_test.go b/resource-manager/recoveryservices/2025-02-01/vaults/id_certificate_test.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/id_certificate_test.go rename to resource-manager/recoveryservices/2025-02-01/vaults/id_certificate_test.go index c1c75931b0b..1d016880229 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/id_certificate_test.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/id_certificate_test.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "testing" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/id_registeredidentity.go b/resource-manager/recoveryservices/2025-02-01/vaults/id_registeredidentity.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/id_registeredidentity.go rename to resource-manager/recoveryservices/2025-02-01/vaults/id_registeredidentity.go index db976b6a8e0..724ca711126 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/id_registeredidentity.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/id_registeredidentity.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "fmt" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/id_registeredidentity_test.go b/resource-manager/recoveryservices/2025-02-01/vaults/id_registeredidentity_test.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/id_registeredidentity_test.go rename to resource-manager/recoveryservices/2025-02-01/vaults/id_registeredidentity_test.go index abf969bbc22..6ef346dd15e 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/id_registeredidentity_test.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/id_registeredidentity_test.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "testing" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/id_vault.go b/resource-manager/recoveryservices/2025-02-01/vaults/id_vault.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/id_vault.go rename to resource-manager/recoveryservices/2025-02-01/vaults/id_vault.go index 41acdedc0be..9c23a25bcb7 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/id_vault.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/id_vault.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "fmt" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/id_vault_test.go b/resource-manager/recoveryservices/2025-02-01/vaults/id_vault_test.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/id_vault_test.go rename to resource-manager/recoveryservices/2025-02-01/vaults/id_vault_test.go index 4229bb2b124..0641240427b 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/id_vault_test.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/id_vault_test.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "testing" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultscreateorupdate.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_createorupdate.go similarity index 58% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_vaultscreateorupdate.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_createorupdate.go index cce43ed1dc3..f52787d1b5a 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultscreateorupdate.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_createorupdate.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -14,22 +14,22 @@ import ( // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. -type VaultsCreateOrUpdateOperationResponse struct { +type CreateOrUpdateOperationResponse struct { Poller pollers.Poller HttpResponse *http.Response OData *odata.OData Model *Vault } -type VaultsCreateOrUpdateOperationOptions struct { +type CreateOrUpdateOperationOptions struct { XMsAuthorizationAuxiliary *string } -func DefaultVaultsCreateOrUpdateOperationOptions() VaultsCreateOrUpdateOperationOptions { - return VaultsCreateOrUpdateOperationOptions{} +func DefaultCreateOrUpdateOperationOptions() CreateOrUpdateOperationOptions { + return CreateOrUpdateOperationOptions{} } -func (o VaultsCreateOrUpdateOperationOptions) ToHeaders() *client.Headers { +func (o CreateOrUpdateOperationOptions) ToHeaders() *client.Headers { out := client.Headers{} if o.XMsAuthorizationAuxiliary != nil { out.Append("x-ms-authorization-auxiliary", fmt.Sprintf("%v", *o.XMsAuthorizationAuxiliary)) @@ -37,20 +37,20 @@ func (o VaultsCreateOrUpdateOperationOptions) ToHeaders() *client.Headers { return &out } -func (o VaultsCreateOrUpdateOperationOptions) ToOData() *odata.Query { +func (o CreateOrUpdateOperationOptions) ToOData() *odata.Query { out := odata.Query{} return &out } -func (o VaultsCreateOrUpdateOperationOptions) ToQuery() *client.QueryParams { +func (o CreateOrUpdateOperationOptions) ToQuery() *client.QueryParams { out := client.QueryParams{} return &out } -// VaultsCreateOrUpdate ... -func (c OpenapisClient) VaultsCreateOrUpdate(ctx context.Context, id VaultId, input Vault, options VaultsCreateOrUpdateOperationOptions) (result VaultsCreateOrUpdateOperationResponse, err error) { +// CreateOrUpdate ... +func (c VaultsClient) CreateOrUpdate(ctx context.Context, id VaultId, input Vault, options CreateOrUpdateOperationOptions) (result CreateOrUpdateOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ @@ -89,15 +89,15 @@ func (c OpenapisClient) VaultsCreateOrUpdate(ctx context.Context, id VaultId, in return } -// VaultsCreateOrUpdateThenPoll performs VaultsCreateOrUpdate then polls until it's completed -func (c OpenapisClient) VaultsCreateOrUpdateThenPoll(ctx context.Context, id VaultId, input Vault, options VaultsCreateOrUpdateOperationOptions) error { - result, err := c.VaultsCreateOrUpdate(ctx, id, input, options) +// CreateOrUpdateThenPoll performs CreateOrUpdate then polls until it's completed +func (c VaultsClient) CreateOrUpdateThenPoll(ctx context.Context, id VaultId, input Vault, options CreateOrUpdateOperationOptions) error { + result, err := c.CreateOrUpdate(ctx, id, input, options) if err != nil { - return fmt.Errorf("performing VaultsCreateOrUpdate: %+v", err) + return fmt.Errorf("performing CreateOrUpdate: %+v", err) } if err := result.Poller.PollUntilDone(ctx); err != nil { - return fmt.Errorf("polling after VaultsCreateOrUpdate: %+v", err) + return fmt.Errorf("polling after CreateOrUpdate: %+v", err) } return nil diff --git a/resource-manager/recoveryservices/2025-02-01/vaults/method_delete.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_delete.go new file mode 100644 index 00000000000..2b8f5be41af --- /dev/null +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_delete.go @@ -0,0 +1,70 @@ +package vaults + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-azure-sdk/sdk/client" + "github.com/hashicorp/go-azure-sdk/sdk/client/pollers" + "github.com/hashicorp/go-azure-sdk/sdk/client/resourcemanager" + "github.com/hashicorp/go-azure-sdk/sdk/odata" +) + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DeleteOperationResponse struct { + Poller pollers.Poller + HttpResponse *http.Response + OData *odata.OData +} + +// Delete ... +func (c VaultsClient) Delete(ctx context.Context, id VaultId) (result DeleteOperationResponse, err error) { + opts := client.RequestOptions{ + ContentType: "application/json; charset=utf-8", + ExpectedStatusCodes: []int{ + http.StatusAccepted, + http.StatusNoContent, + }, + HttpMethod: http.MethodDelete, + Path: id.ID(), + } + + req, err := c.Client.NewRequest(ctx, opts) + if err != nil { + return + } + + var resp *client.Response + resp, err = req.Execute(ctx) + if resp != nil { + result.OData = resp.OData + result.HttpResponse = resp.Response + } + if err != nil { + return + } + + result.Poller, err = resourcemanager.PollerFromResponse(resp, c.Client) + if err != nil { + return + } + + return +} + +// DeleteThenPoll performs Delete then polls until it's completed +func (c VaultsClient) DeleteThenPoll(ctx context.Context, id VaultId) error { + result, err := c.Delete(ctx, id) + if err != nil { + return fmt.Errorf("performing Delete: %+v", err) + } + + if err := result.Poller.PollUntilDone(ctx); err != nil { + return fmt.Errorf("polling after Delete: %+v", err) + } + + return nil +} diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsget.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_get.go similarity index 82% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsget.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_get.go index 9cc2cedfa35..64a64f258ff 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsget.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_get.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -11,14 +11,14 @@ import ( // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. -type VaultsGetOperationResponse struct { +type GetOperationResponse struct { HttpResponse *http.Response OData *odata.OData Model *Vault } -// VaultsGet ... -func (c OpenapisClient) VaultsGet(ctx context.Context, id VaultId) (result VaultsGetOperationResponse, err error) { +// Get ... +func (c VaultsClient) Get(ctx context.Context, id VaultId) (result GetOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultslistbysubscriptionid.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_listbysubscriptionid.go similarity index 55% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_vaultslistbysubscriptionid.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_listbysubscriptionid.go index b0103ced71c..c289dedd82f 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultslistbysubscriptionid.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_listbysubscriptionid.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -13,22 +13,22 @@ import ( // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. -type VaultsListBySubscriptionIdOperationResponse struct { +type ListBySubscriptionIdOperationResponse struct { HttpResponse *http.Response OData *odata.OData Model *[]Vault } -type VaultsListBySubscriptionIdCompleteResult struct { +type ListBySubscriptionIdCompleteResult struct { LatestHttpResponse *http.Response Items []Vault } -type VaultsListBySubscriptionIdCustomPager struct { +type ListBySubscriptionIdCustomPager struct { NextLink *odata.Link `json:"nextLink"` } -func (p *VaultsListBySubscriptionIdCustomPager) NextPageLink() *odata.Link { +func (p *ListBySubscriptionIdCustomPager) NextPageLink() *odata.Link { defer func() { p.NextLink = nil }() @@ -36,15 +36,15 @@ func (p *VaultsListBySubscriptionIdCustomPager) NextPageLink() *odata.Link { return p.NextLink } -// VaultsListBySubscriptionId ... -func (c OpenapisClient) VaultsListBySubscriptionId(ctx context.Context, id commonids.SubscriptionId) (result VaultsListBySubscriptionIdOperationResponse, err error) { +// ListBySubscriptionId ... +func (c VaultsClient) ListBySubscriptionId(ctx context.Context, id commonids.SubscriptionId) (result ListBySubscriptionIdOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ http.StatusOK, }, HttpMethod: http.MethodGet, - Pager: &VaultsListBySubscriptionIdCustomPager{}, + Pager: &ListBySubscriptionIdCustomPager{}, Path: fmt.Sprintf("%s/providers/Microsoft.RecoveryServices/vaults", id.ID()), } @@ -75,16 +75,16 @@ func (c OpenapisClient) VaultsListBySubscriptionId(ctx context.Context, id commo return } -// VaultsListBySubscriptionIdComplete retrieves all the results into a single object -func (c OpenapisClient) VaultsListBySubscriptionIdComplete(ctx context.Context, id commonids.SubscriptionId) (VaultsListBySubscriptionIdCompleteResult, error) { - return c.VaultsListBySubscriptionIdCompleteMatchingPredicate(ctx, id, VaultOperationPredicate{}) +// ListBySubscriptionIdComplete retrieves all the results into a single object +func (c VaultsClient) ListBySubscriptionIdComplete(ctx context.Context, id commonids.SubscriptionId) (ListBySubscriptionIdCompleteResult, error) { + return c.ListBySubscriptionIdCompleteMatchingPredicate(ctx, id, VaultOperationPredicate{}) } -// VaultsListBySubscriptionIdCompleteMatchingPredicate retrieves all the results and then applies the predicate -func (c OpenapisClient) VaultsListBySubscriptionIdCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate VaultOperationPredicate) (result VaultsListBySubscriptionIdCompleteResult, err error) { +// ListBySubscriptionIdCompleteMatchingPredicate retrieves all the results and then applies the predicate +func (c VaultsClient) ListBySubscriptionIdCompleteMatchingPredicate(ctx context.Context, id commonids.SubscriptionId, predicate VaultOperationPredicate) (result ListBySubscriptionIdCompleteResult, err error) { items := make([]Vault, 0) - resp, err := c.VaultsListBySubscriptionId(ctx, id) + resp, err := c.ListBySubscriptionId(ctx, id) if err != nil { result.LatestHttpResponse = resp.HttpResponse err = fmt.Errorf("loading results: %+v", err) @@ -98,7 +98,7 @@ func (c OpenapisClient) VaultsListBySubscriptionIdCompleteMatchingPredicate(ctx } } - result = VaultsListBySubscriptionIdCompleteResult{ + result = ListBySubscriptionIdCompleteResult{ LatestHttpResponse: resp.HttpResponse, Items: items, } diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_registeredidentitiesdelete.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_registeredidentitiesdelete.go similarity index 83% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_registeredidentitiesdelete.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_registeredidentitiesdelete.go index 3bf2a951f64..2b4b12a6588 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_registeredidentitiesdelete.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_registeredidentitiesdelete.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -17,7 +17,7 @@ type RegisteredIdentitiesDeleteOperationResponse struct { } // RegisteredIdentitiesDelete ... -func (c OpenapisClient) RegisteredIdentitiesDelete(ctx context.Context, id RegisteredIdentityId) (result RegisteredIdentitiesDeleteOperationResponse, err error) { +func (c VaultsClient) RegisteredIdentitiesDelete(ctx context.Context, id RegisteredIdentityId) (result RegisteredIdentitiesDeleteOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_replicationusageslist.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_replicationusageslist.go similarity index 82% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_replicationusageslist.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_replicationusageslist.go index 6c83cbfdbd0..23a8ade529a 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_replicationusageslist.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_replicationusageslist.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -36,7 +36,7 @@ func (p *ReplicationUsagesListCustomPager) NextPageLink() *odata.Link { } // ReplicationUsagesList ... -func (c OpenapisClient) ReplicationUsagesList(ctx context.Context, id VaultId) (result ReplicationUsagesListOperationResponse, err error) { +func (c VaultsClient) ReplicationUsagesList(ctx context.Context, id VaultId) (result ReplicationUsagesListOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ @@ -75,12 +75,12 @@ func (c OpenapisClient) ReplicationUsagesList(ctx context.Context, id VaultId) ( } // ReplicationUsagesListComplete retrieves all the results into a single object -func (c OpenapisClient) ReplicationUsagesListComplete(ctx context.Context, id VaultId) (ReplicationUsagesListCompleteResult, error) { +func (c VaultsClient) ReplicationUsagesListComplete(ctx context.Context, id VaultId) (ReplicationUsagesListCompleteResult, error) { return c.ReplicationUsagesListCompleteMatchingPredicate(ctx, id, ReplicationUsageOperationPredicate{}) } // ReplicationUsagesListCompleteMatchingPredicate retrieves all the results and then applies the predicate -func (c OpenapisClient) ReplicationUsagesListCompleteMatchingPredicate(ctx context.Context, id VaultId, predicate ReplicationUsageOperationPredicate) (result ReplicationUsagesListCompleteResult, err error) { +func (c VaultsClient) ReplicationUsagesListCompleteMatchingPredicate(ctx context.Context, id VaultId, predicate ReplicationUsageOperationPredicate) (result ReplicationUsagesListCompleteResult, err error) { items := make([]ReplicationUsage, 0) resp, err := c.ReplicationUsagesList(ctx, id) diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsupdate.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_update.go similarity index 61% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsupdate.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_update.go index 233a2ac9384..fed13b914bf 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultsupdate.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_update.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -14,22 +14,22 @@ import ( // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. -type VaultsUpdateOperationResponse struct { +type UpdateOperationResponse struct { Poller pollers.Poller HttpResponse *http.Response OData *odata.OData Model *Vault } -type VaultsUpdateOperationOptions struct { +type UpdateOperationOptions struct { XMsAuthorizationAuxiliary *string } -func DefaultVaultsUpdateOperationOptions() VaultsUpdateOperationOptions { - return VaultsUpdateOperationOptions{} +func DefaultUpdateOperationOptions() UpdateOperationOptions { + return UpdateOperationOptions{} } -func (o VaultsUpdateOperationOptions) ToHeaders() *client.Headers { +func (o UpdateOperationOptions) ToHeaders() *client.Headers { out := client.Headers{} if o.XMsAuthorizationAuxiliary != nil { out.Append("x-ms-authorization-auxiliary", fmt.Sprintf("%v", *o.XMsAuthorizationAuxiliary)) @@ -37,20 +37,20 @@ func (o VaultsUpdateOperationOptions) ToHeaders() *client.Headers { return &out } -func (o VaultsUpdateOperationOptions) ToOData() *odata.Query { +func (o UpdateOperationOptions) ToOData() *odata.Query { out := odata.Query{} return &out } -func (o VaultsUpdateOperationOptions) ToQuery() *client.QueryParams { +func (o UpdateOperationOptions) ToQuery() *client.QueryParams { out := client.QueryParams{} return &out } -// VaultsUpdate ... -func (c OpenapisClient) VaultsUpdate(ctx context.Context, id VaultId, input PatchVault, options VaultsUpdateOperationOptions) (result VaultsUpdateOperationResponse, err error) { +// Update ... +func (c VaultsClient) Update(ctx context.Context, id VaultId, input PatchVault, options UpdateOperationOptions) (result UpdateOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ @@ -89,15 +89,15 @@ func (c OpenapisClient) VaultsUpdate(ctx context.Context, id VaultId, input Patc return } -// VaultsUpdateThenPoll performs VaultsUpdate then polls until it's completed -func (c OpenapisClient) VaultsUpdateThenPoll(ctx context.Context, id VaultId, input PatchVault, options VaultsUpdateOperationOptions) error { - result, err := c.VaultsUpdate(ctx, id, input, options) +// UpdateThenPoll performs Update then polls until it's completed +func (c VaultsClient) UpdateThenPoll(ctx context.Context, id VaultId, input PatchVault, options UpdateOperationOptions) error { + result, err := c.Update(ctx, id, input, options) if err != nil { - return fmt.Errorf("performing VaultsUpdate: %+v", err) + return fmt.Errorf("performing Update: %+v", err) } if err := result.Poller.PollUntilDone(ctx); err != nil { - return fmt.Errorf("polling after VaultsUpdate: %+v", err) + return fmt.Errorf("polling after Update: %+v", err) } return nil diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_usageslistbyvaults.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_usageslistbyvaults.go similarity index 82% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_usageslistbyvaults.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_usageslistbyvaults.go index 426e3f507e2..386060d255c 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_usageslistbyvaults.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_usageslistbyvaults.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -36,7 +36,7 @@ func (p *UsagesListByVaultsCustomPager) NextPageLink() *odata.Link { } // UsagesListByVaults ... -func (c OpenapisClient) UsagesListByVaults(ctx context.Context, id VaultId) (result UsagesListByVaultsOperationResponse, err error) { +func (c VaultsClient) UsagesListByVaults(ctx context.Context, id VaultId) (result UsagesListByVaultsOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ @@ -75,12 +75,12 @@ func (c OpenapisClient) UsagesListByVaults(ctx context.Context, id VaultId) (res } // UsagesListByVaultsComplete retrieves all the results into a single object -func (c OpenapisClient) UsagesListByVaultsComplete(ctx context.Context, id VaultId) (UsagesListByVaultsCompleteResult, error) { +func (c VaultsClient) UsagesListByVaultsComplete(ctx context.Context, id VaultId) (UsagesListByVaultsCompleteResult, error) { return c.UsagesListByVaultsCompleteMatchingPredicate(ctx, id, VaultUsageOperationPredicate{}) } // UsagesListByVaultsCompleteMatchingPredicate retrieves all the results and then applies the predicate -func (c OpenapisClient) UsagesListByVaultsCompleteMatchingPredicate(ctx context.Context, id VaultId, predicate VaultUsageOperationPredicate) (result UsagesListByVaultsCompleteResult, err error) { +func (c VaultsClient) UsagesListByVaultsCompleteMatchingPredicate(ctx context.Context, id VaultId, predicate VaultUsageOperationPredicate) (result UsagesListByVaultsCompleteResult, err error) { items := make([]VaultUsage, 0) resp, err := c.UsagesListByVaults(ctx, id) diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultcertificatescreate.go b/resource-manager/recoveryservices/2025-02-01/vaults/method_vaultcertificatescreate.go similarity index 85% rename from resource-manager/recoveryservices/2025-02-01/openapis/method_vaultcertificatescreate.go rename to resource-manager/recoveryservices/2025-02-01/vaults/method_vaultcertificatescreate.go index 669bc65aeff..f696af799ff 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/method_vaultcertificatescreate.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/method_vaultcertificatescreate.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "context" @@ -18,7 +18,7 @@ type VaultCertificatesCreateOperationResponse struct { } // VaultCertificatesCreate ... -func (c OpenapisClient) VaultCertificatesCreate(ctx context.Context, id CertificateId, input CertificateRequest) (result VaultCertificatesCreateOperationResponse, err error) { +func (c VaultsClient) VaultCertificatesCreate(ctx context.Context, id CertificateId, input CertificateRequest) (result VaultCertificatesCreateOperationResponse, err error) { opts := client.RequestOptions{ ContentType: "application/json; charset=utf-8", ExpectedStatusCodes: []int{ diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_associatedidentity.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_associatedidentity.go similarity index 95% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_associatedidentity.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_associatedidentity.go index 93c9b06fef5..3cd9bd3ccd0 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_associatedidentity.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_associatedidentity.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_azuremonitoralertsettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_azuremonitoralertsettings.go similarity index 96% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_azuremonitoralertsettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_azuremonitoralertsettings.go index 94b88c6cbbe..3f36e0be907 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_azuremonitoralertsettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_azuremonitoralertsettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_certificaterequest.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_certificaterequest.go similarity index 93% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_certificaterequest.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_certificaterequest.go index c4c7a90b891..201c993aee6 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_certificaterequest.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_certificaterequest.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_classicalertsettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_classicalertsettings.go similarity index 95% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_classicalertsettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_classicalertsettings.go index b3296423422..bf08cd1a80e 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_classicalertsettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_classicalertsettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_cmkkekidentity.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_cmkkekidentity.go similarity index 95% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_cmkkekidentity.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_cmkkekidentity.go index e17684754bf..1e5e7be3d2d 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_cmkkekidentity.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_cmkkekidentity.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_cmkkeyvaultproperties.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_cmkkeyvaultproperties.go similarity index 93% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_cmkkeyvaultproperties.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_cmkkeyvaultproperties.go index c6dc3e9f8dd..c53f530b9f5 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_cmkkeyvaultproperties.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_cmkkeyvaultproperties.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_crosssubscriptionrestoresettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_crosssubscriptionrestoresettings.go similarity index 94% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_crosssubscriptionrestoresettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_crosssubscriptionrestoresettings.go index 0ad7bfef77c..4a4fecb8bdd 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_crosssubscriptionrestoresettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_crosssubscriptionrestoresettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_immutabilitysettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_immutabilitysettings.go similarity index 93% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_immutabilitysettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_immutabilitysettings.go index 6c17ac2fdcd..f52ae97a4c3 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_immutabilitysettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_immutabilitysettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_jobssummary.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_jobssummary.go similarity index 95% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_jobssummary.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_jobssummary.go index bc5f5e9871b..2ba3e5b9c42 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_jobssummary.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_jobssummary.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_monitoringsettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_monitoringsettings.go similarity index 95% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_monitoringsettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_monitoringsettings.go index 66bde3ad511..391ede46ae2 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_monitoringsettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_monitoringsettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_monitoringsummary.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_monitoringsummary.go similarity index 97% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_monitoringsummary.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_monitoringsummary.go index ce3b52e87f7..a2305a3ab37 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_monitoringsummary.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_monitoringsummary.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_nameinfo.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_nameinfo.go similarity index 94% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_nameinfo.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_nameinfo.go index 0616124075b..d06544864ad 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_nameinfo.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_nameinfo.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_patchvault.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_patchvault.go similarity index 98% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_patchvault.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_patchvault.go index e5749e343ed..92e3ba243fa 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_patchvault.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_patchvault.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpoint.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpoint.go similarity index 92% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpoint.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpoint.go index 46f51261dd7..64ce35a3d98 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpoint.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpoint.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpointconnection.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpointconnection.go similarity index 97% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpointconnection.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpointconnection.go index 376a862ab34..687aefc111c 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpointconnection.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpointconnection.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpointconnectionvaultproperties.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpointconnectionvaultproperties.go similarity index 96% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpointconnectionvaultproperties.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpointconnectionvaultproperties.go index ca3dff7e97e..5758db1bfa3 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_privateendpointconnectionvaultproperties.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_privateendpointconnectionvaultproperties.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_privatelinkserviceconnectionstate.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_privatelinkserviceconnectionstate.go similarity index 96% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_privatelinkserviceconnectionstate.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_privatelinkserviceconnectionstate.go index aa555c699ca..b07724e0e27 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_privatelinkserviceconnectionstate.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_privatelinkserviceconnectionstate.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_rawcertificatedata.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_rawcertificatedata.go similarity index 94% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_rawcertificatedata.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_rawcertificatedata.go index f50301640b5..045519f350d 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_rawcertificatedata.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_rawcertificatedata.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_replicationusage.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_replicationusage.go similarity index 97% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_replicationusage.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_replicationusage.go index 6178c4ee181..2f98c2fd18e 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_replicationusage.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_replicationusage.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificateandaaddetails.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificateandaaddetails.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificateandaaddetails.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificateandaaddetails.go index 69ea360b51c..c252eba5285 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificateandaaddetails.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificateandaaddetails.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "encoding/json" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificateandacsdetails.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificateandacsdetails.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificateandacsdetails.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificateandacsdetails.go index 33ff7871d88..669384d10fa 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificateandacsdetails.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificateandacsdetails.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "encoding/json" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificatedetails.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificatedetails.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificatedetails.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificatedetails.go index 42a117e66e2..f560a7058ff 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_resourcecertificatedetails.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_resourcecertificatedetails.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "encoding/json" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_restoresettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_restoresettings.go similarity index 94% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_restoresettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_restoresettings.go index 2feb2edc80c..b5126dbe263 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_restoresettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_restoresettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_securitysettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_securitysettings.go similarity index 97% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_securitysettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_securitysettings.go index bec61002cf6..c32fe717e5f 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_securitysettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_securitysettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_sku.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_sku.go similarity index 95% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_sku.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_sku.go index f955cfc8e4c..d89e1340ee6 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_sku.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_sku.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_softdeletesettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_softdeletesettings.go similarity index 96% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_softdeletesettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_softdeletesettings.go index 42934e1c15f..fb257ac28c7 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_softdeletesettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_softdeletesettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_sourcescanconfiguration.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_sourcescanconfiguration.go similarity index 95% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_sourcescanconfiguration.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_sourcescanconfiguration.go index c8574659d21..4101363f704 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_sourcescanconfiguration.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_sourcescanconfiguration.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_upgradedetails.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_upgradedetails.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_upgradedetails.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_upgradedetails.go index 477952e38b8..f5e021983ab 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_upgradedetails.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_upgradedetails.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "time" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_vault.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_vault.go similarity index 98% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_vault.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_vault.go index 62a0f21dff2..dc5d7d049c1 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_vault.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_vault.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "github.com/hashicorp/go-azure-helpers/resourcemanager/identity" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultcertificateresponse.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultcertificateresponse.go similarity index 98% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_vaultcertificateresponse.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_vaultcertificateresponse.go index 5fb27060b5a..ebf37880646 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultcertificateresponse.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultcertificateresponse.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "encoding/json" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultproperties.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultproperties.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_vaultproperties.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_vaultproperties.go index 7e703b39e7d..5942dc4f409 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultproperties.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultproperties.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesencryption.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesencryption.go similarity index 96% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesencryption.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesencryption.go index 0899f3a837d..05c6effec1d 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesencryption.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesencryption.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesmovedetails.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesmovedetails.go similarity index 98% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesmovedetails.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesmovedetails.go index b5a872cf96f..b65be5a0669 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesmovedetails.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesmovedetails.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "time" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesredundancysettings.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesredundancysettings.go similarity index 96% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesredundancysettings.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesredundancysettings.go index 9ba15bd702e..2d1174f221f 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultpropertiesredundancysettings.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultpropertiesredundancysettings.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultusage.go b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultusage.go similarity index 98% rename from resource-manager/recoveryservices/2025-02-01/openapis/model_vaultusage.go rename to resource-manager/recoveryservices/2025-02-01/vaults/model_vaultusage.go index 952df7eb629..296e2fb8f99 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/model_vaultusage.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/model_vaultusage.go @@ -1,4 +1,4 @@ -package openapis +package vaults import ( "time" diff --git a/resource-manager/recoveryservices/2025-02-01/openapis/predicates.go b/resource-manager/recoveryservices/2025-02-01/vaults/predicates.go similarity index 99% rename from resource-manager/recoveryservices/2025-02-01/openapis/predicates.go rename to resource-manager/recoveryservices/2025-02-01/vaults/predicates.go index fbc841f5b8f..5e614d72f8d 100644 --- a/resource-manager/recoveryservices/2025-02-01/openapis/predicates.go +++ b/resource-manager/recoveryservices/2025-02-01/vaults/predicates.go @@ -1,4 +1,4 @@ -package openapis +package vaults // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. diff --git a/resource-manager/recoveryservices/2025-02-01/vaults/version.go b/resource-manager/recoveryservices/2025-02-01/vaults/version.go new file mode 100644 index 00000000000..d3451129474 --- /dev/null +++ b/resource-manager/recoveryservices/2025-02-01/vaults/version.go @@ -0,0 +1,10 @@ +package vaults + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +const defaultApiVersion = "2025-02-01" + +func userAgent() string { + return "hashicorp/go-azure-sdk/vaults/2025-02-01" +} diff --git a/resource-manager/redisenterprise/2025-04-01/accesspolicyassignments/model_accesspolicyassignment.go b/resource-manager/redisenterprise/2025-04-01/accesspolicyassignments/model_accesspolicyassignment.go index 7a3791c9374..6caa7fbc508 100644 --- a/resource-manager/redisenterprise/2025-04-01/accesspolicyassignments/model_accesspolicyassignment.go +++ b/resource-manager/redisenterprise/2025-04-01/accesspolicyassignments/model_accesspolicyassignment.go @@ -1,9 +1,5 @@ package accesspolicyassignments -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. @@ -11,6 +7,5 @@ type AccessPolicyAssignment struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Properties *AccessPolicyAssignmentProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` Type *string `json:"type,omitempty"` } diff --git a/resource-manager/redisenterprise/2025-04-01/databases/model_accesspolicyassignment.go b/resource-manager/redisenterprise/2025-04-01/databases/model_accesspolicyassignment.go index a87ffb93b55..fd4718ad413 100644 --- a/resource-manager/redisenterprise/2025-04-01/databases/model_accesspolicyassignment.go +++ b/resource-manager/redisenterprise/2025-04-01/databases/model_accesspolicyassignment.go @@ -1,9 +1,5 @@ package databases -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. @@ -11,6 +7,5 @@ type AccessPolicyAssignment struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Properties *AccessPolicyAssignmentProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` Type *string `json:"type,omitempty"` } diff --git a/resource-manager/redisenterprise/2025-04-01/databases/model_database.go b/resource-manager/redisenterprise/2025-04-01/databases/model_database.go index f656e011163..a9880a7432b 100644 --- a/resource-manager/redisenterprise/2025-04-01/databases/model_database.go +++ b/resource-manager/redisenterprise/2025-04-01/databases/model_database.go @@ -1,16 +1,11 @@ package databases -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. type Database struct { - Id *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Properties *DatabaseProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` - Type *string `json:"type,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` } diff --git a/resource-manager/redisenterprise/2025-04-01/privateendpointconnections/model_privateendpointconnection.go b/resource-manager/redisenterprise/2025-04-01/privateendpointconnections/model_privateendpointconnection.go index 977fe056fe3..fde7f4db0fc 100644 --- a/resource-manager/redisenterprise/2025-04-01/privateendpointconnections/model_privateendpointconnection.go +++ b/resource-manager/redisenterprise/2025-04-01/privateendpointconnections/model_privateendpointconnection.go @@ -1,9 +1,5 @@ package privateendpointconnections -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. @@ -11,6 +7,5 @@ type PrivateEndpointConnection struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Properties *PrivateEndpointConnectionProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` Type *string `json:"type,omitempty"` } diff --git a/resource-manager/redisenterprise/2025-04-01/privatelinkresources/model_privatelinkresource.go b/resource-manager/redisenterprise/2025-04-01/privatelinkresources/model_privatelinkresource.go index 6e8c3d8148b..69e8ae0e57a 100644 --- a/resource-manager/redisenterprise/2025-04-01/privatelinkresources/model_privatelinkresource.go +++ b/resource-manager/redisenterprise/2025-04-01/privatelinkresources/model_privatelinkresource.go @@ -1,9 +1,5 @@ package privatelinkresources -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. @@ -11,6 +7,5 @@ type PrivateLinkResource struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Properties *PrivateLinkResourceProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` Type *string `json:"type,omitempty"` } diff --git a/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_accesspolicyassignment.go b/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_accesspolicyassignment.go index 5a91bc02c11..0b0a3710006 100644 --- a/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_accesspolicyassignment.go +++ b/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_accesspolicyassignment.go @@ -1,9 +1,5 @@ package redisenterprise -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. @@ -11,6 +7,5 @@ type AccessPolicyAssignment struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Properties *AccessPolicyAssignmentProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` Type *string `json:"type,omitempty"` } diff --git a/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_database.go b/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_database.go index 1fc4662017d..dd20d0b355a 100644 --- a/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_database.go +++ b/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_database.go @@ -1,16 +1,11 @@ package redisenterprise -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. type Database struct { - Id *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Properties *DatabaseProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` - Type *string `json:"type,omitempty"` + Id *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DatabaseProperties `json:"properties,omitempty"` + Type *string `json:"type,omitempty"` } diff --git a/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_privateendpointconnection.go b/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_privateendpointconnection.go index 12beb91b747..5da2f77534d 100644 --- a/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_privateendpointconnection.go +++ b/resource-manager/redisenterprise/2025-04-01/redisenterprise/model_privateendpointconnection.go @@ -1,9 +1,5 @@ package redisenterprise -import ( - "github.com/hashicorp/go-azure-helpers/resourcemanager/systemdata" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. @@ -11,6 +7,5 @@ type PrivateEndpointConnection struct { Id *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Properties *PrivateEndpointConnectionProperties `json:"properties,omitempty"` - SystemData *systemdata.SystemData `json:"systemData,omitempty"` Type *string `json:"type,omitempty"` } diff --git a/resource-manager/storage/2025-01-01/storageaccounts/model_dualstackendpointpreference.go b/resource-manager/storage/2025-01-01/storageaccounts/model_dualstackendpointpreference.go new file mode 100644 index 00000000000..d4b6f5cfc9e --- /dev/null +++ b/resource-manager/storage/2025-01-01/storageaccounts/model_dualstackendpointpreference.go @@ -0,0 +1,8 @@ +package storageaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type DualStackEndpointPreference struct { + PublishIPv6Endpoint *bool `json:"publishIpv6Endpoint,omitempty"` +} diff --git a/resource-manager/storage/2025-01-01/storageaccounts/model_endpoints.go b/resource-manager/storage/2025-01-01/storageaccounts/model_endpoints.go index 478d010d562..7c09b4be07c 100644 --- a/resource-manager/storage/2025-01-01/storageaccounts/model_endpoints.go +++ b/resource-manager/storage/2025-01-01/storageaccounts/model_endpoints.go @@ -7,6 +7,7 @@ type Endpoints struct { Blob *string `json:"blob,omitempty"` Dfs *string `json:"dfs,omitempty"` File *string `json:"file,omitempty"` + IPv6Endpoints *StorageAccountIPv6Endpoints `json:"ipv6Endpoints,omitempty"` InternetEndpoints *StorageAccountInternetEndpoints `json:"internetEndpoints,omitempty"` MicrosoftEndpoints *StorageAccountMicrosoftEndpoints `json:"microsoftEndpoints,omitempty"` Queue *string `json:"queue,omitempty"` diff --git a/resource-manager/storage/2025-01-01/storageaccounts/model_networkruleset.go b/resource-manager/storage/2025-01-01/storageaccounts/model_networkruleset.go index fcbda347bd2..09add3be371 100644 --- a/resource-manager/storage/2025-01-01/storageaccounts/model_networkruleset.go +++ b/resource-manager/storage/2025-01-01/storageaccounts/model_networkruleset.go @@ -7,6 +7,7 @@ type NetworkRuleSet struct { Bypass *Bypass `json:"bypass,omitempty"` DefaultAction DefaultAction `json:"defaultAction"` IPRules *[]IPRule `json:"ipRules,omitempty"` + IPv6Rules *[]IPRule `json:"ipv6Rules,omitempty"` ResourceAccessRules *[]ResourceAccessRule `json:"resourceAccessRules,omitempty"` VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"` } diff --git a/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountipv6endpoints.go b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountipv6endpoints.go new file mode 100644 index 00000000000..4250202d044 --- /dev/null +++ b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountipv6endpoints.go @@ -0,0 +1,15 @@ +package storageaccounts + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See NOTICE.txt in the project root for license information. + +type StorageAccountIPv6Endpoints struct { + Blob *string `json:"blob,omitempty"` + Dfs *string `json:"dfs,omitempty"` + File *string `json:"file,omitempty"` + InternetEndpoints *StorageAccountInternetEndpoints `json:"internetEndpoints,omitempty"` + MicrosoftEndpoints *StorageAccountMicrosoftEndpoints `json:"microsoftEndpoints,omitempty"` + Queue *string `json:"queue,omitempty"` + Table *string `json:"table,omitempty"` + Web *string `json:"web,omitempty"` +} diff --git a/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountproperties.go b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountproperties.go index 4adc561a27b..1abf4c211d1 100644 --- a/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountproperties.go +++ b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountproperties.go @@ -22,6 +22,7 @@ type StorageAccountProperties struct { CustomDomain *CustomDomain `json:"customDomain,omitempty"` DefaultToOAuthAuthentication *bool `json:"defaultToOAuthAuthentication,omitempty"` DnsEndpointType *DnsEndpointType `json:"dnsEndpointType,omitempty"` + DualStackEndpointPreference *DualStackEndpointPreference `json:"dualStackEndpointPreference,omitempty"` EnableExtendedGroups *bool `json:"enableExtendedGroups,omitempty"` Encryption *Encryption `json:"encryption,omitempty"` FailoverInProgress *bool `json:"failoverInProgress,omitempty"` diff --git a/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiescreateparameters.go b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiescreateparameters.go index b514adb0b4b..9ebc27e65d8 100644 --- a/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiescreateparameters.go +++ b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiescreateparameters.go @@ -1,11 +1,5 @@ package storageaccounts -import ( - "time" - - "github.com/hashicorp/go-azure-helpers/lang/dates" -) - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See NOTICE.txt in the project root for license information. @@ -18,8 +12,8 @@ type StorageAccountPropertiesCreateParameters struct { AzureFilesIdentityBasedAuthentication *AzureFilesIdentityBasedAuthentication `json:"azureFilesIdentityBasedAuthentication,omitempty"` CustomDomain *CustomDomain `json:"customDomain,omitempty"` DefaultToOAuthAuthentication *bool `json:"defaultToOAuthAuthentication,omitempty"` - DeletedAccountCreationTime *string `json:"deletedAccountCreationTime,omitempty"` DnsEndpointType *DnsEndpointType `json:"dnsEndpointType,omitempty"` + DualStackEndpointPreference *DualStackEndpointPreference `json:"dualStackEndpointPreference,omitempty"` EnableExtendedGroups *bool `json:"enableExtendedGroups,omitempty"` Encryption *Encryption `json:"encryption,omitempty"` ImmutableStorageWithVersioning *ImmutableStorageAccount `json:"immutableStorageWithVersioning,omitempty"` @@ -36,15 +30,3 @@ type StorageAccountPropertiesCreateParameters struct { SasPolicy *SasPolicy `json:"sasPolicy,omitempty"` SupportsHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` } - -func (o *StorageAccountPropertiesCreateParameters) GetDeletedAccountCreationTimeAsTime() (*time.Time, error) { - if o.DeletedAccountCreationTime == nil { - return nil, nil - } - return dates.ParseAsFormat(o.DeletedAccountCreationTime, "2006-01-02T15:04:05Z07:00") -} - -func (o *StorageAccountPropertiesCreateParameters) SetDeletedAccountCreationTimeAsTime(input time.Time) { - formatted := input.Format("2006-01-02T15:04:05Z07:00") - o.DeletedAccountCreationTime = &formatted -} diff --git a/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiesupdateparameters.go b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiesupdateparameters.go index fef86b8f536..9d3dda7b65a 100644 --- a/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiesupdateparameters.go +++ b/resource-manager/storage/2025-01-01/storageaccounts/model_storageaccountpropertiesupdateparameters.go @@ -13,6 +13,7 @@ type StorageAccountPropertiesUpdateParameters struct { CustomDomain *CustomDomain `json:"customDomain,omitempty"` DefaultToOAuthAuthentication *bool `json:"defaultToOAuthAuthentication,omitempty"` DnsEndpointType *DnsEndpointType `json:"dnsEndpointType,omitempty"` + DualStackEndpointPreference *DualStackEndpointPreference `json:"dualStackEndpointPreference,omitempty"` EnableExtendedGroups *bool `json:"enableExtendedGroups,omitempty"` Encryption *Encryption `json:"encryption,omitempty"` ImmutableStorageWithVersioning *ImmutableStorageAccount `json:"immutableStorageWithVersioning,omitempty"`